From 54001eac0982e8c166c82a28d85bb1229d23b4b9 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Tue, 30 Apr 2024 13:06:23 +0200 Subject: [PATCH 01/33] add managed cluster to e2e Signed-off-by: Coleen Iona Quadros --- cicd-scripts/run-e2e-tests.sh | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/cicd-scripts/run-e2e-tests.sh b/cicd-scripts/run-e2e-tests.sh index 3071db8d6..49f964a8b 100755 --- a/cicd-scripts/run-e2e-tests.sh +++ b/cicd-scripts/run-e2e-tests.sh @@ -39,7 +39,7 @@ else fi kubecontext=$(kubectl config current-context) -cluster_name="local-cluster" +hub_cluster_name="local-cluster" if [[ -n ${IS_KIND_ENV} ]]; then clusterServerURL="https://127.0.0.1:32806" @@ -67,7 +67,7 @@ if [[ -n ${IS_KIND_ENV} ]]; then printf "\n grafanaHost: grafana-test" >>${OPTIONSFILE} fi printf "\n clusters:" >>${OPTIONSFILE} -printf "\n - name: ${cluster_name}" >>${OPTIONSFILE} +printf "\n - name: ${hub_cluster_name}" >>${OPTIONSFILE} if [[ -n ${IS_KIND_ENV} ]]; then printf "\n clusterServerURL: ${clusterServerURL}" >>${OPTIONSFILE} fi @@ -75,6 +75,18 @@ printf "\n baseDomain: ${base_domain}" >>${OPTIONSFILE} printf "\n kubeconfig: ${kubeconfig_hub_path}" >>${OPTIONSFILE} printf "\n kubecontext: ${kubecontext}" >>${OPTIONSFILE} +kubeconfig_managed_path="${SHARED_DIR}/managed-1.kc" +if [[ -z ${IS_KIND_ENV} && -f "${kubeconfig_managed_path}" ]]; then + managed_cluster_name="managed-cluster-1" + kubecontext_managed=$(kubectl --kubeconfig="${kubeconfig_managed_path}" config current-context) + app_domain_managed=$(kubectl -n openshift-ingress-operator --kubeconfig="${kubeconfig_managed_path}" get ingresscontrollers default -ojsonpath='{.status.domain}') + base_domain_managed="${app_domain_managed#apps.}" + printf "\n - name: ${managed_cluster_name}" >>${OPTIONSFILE} + printf "\n baseDomain: ${base_domain_managed}" >>${OPTIONSFILE} + printf "\n kubeconfig: ${kubeconfig_managed_path}" >>${OPTIONSFILE} + printf "\n kubecontext: ${kubecontext_managed}" >>${OPTIONSFILE} +fi + if command -v ginkgo &>/dev/null; then GINKGO_CMD=ginkgo else From 63dbb4b66a9f61200dccd8887f700599385753d3 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Tue, 30 Apr 2024 13:10:58 +0200 Subject: [PATCH 02/33] lint Signed-off-by: Coleen Iona Quadros --- cicd-scripts/run-e2e-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cicd-scripts/run-e2e-tests.sh b/cicd-scripts/run-e2e-tests.sh index 49f964a8b..282bfe208 100755 --- a/cicd-scripts/run-e2e-tests.sh +++ b/cicd-scripts/run-e2e-tests.sh @@ -76,7 +76,7 @@ printf "\n kubeconfig: ${kubeconfig_hub_path}" >>${OPTIONSFILE} printf "\n kubecontext: ${kubecontext}" >>${OPTIONSFILE} kubeconfig_managed_path="${SHARED_DIR}/managed-1.kc" -if [[ -z ${IS_KIND_ENV} && -f "${kubeconfig_managed_path}" ]]; then +if [[ -z ${IS_KIND_ENV} && -f ${kubeconfig_managed_path} ]]; then managed_cluster_name="managed-cluster-1" kubecontext_managed=$(kubectl --kubeconfig="${kubeconfig_managed_path}" config current-context) app_domain_managed=$(kubectl -n openshift-ingress-operator --kubeconfig="${kubeconfig_managed_path}" get ingresscontrollers default -ojsonpath='{.status.domain}') From 7f2656813686ccf7d4ed08a2068c97e3a3107ab6 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 2 May 2024 16:48:32 +0200 Subject: [PATCH 03/33] refactor to accomodate managed cluster Signed-off-by: Coleen Iona Quadros --- .../pkg/tests/observability_certrenew_test.go | 22 ++-- .../observability_endpoint_preserve_test.go | 102 +++++++++--------- tests/pkg/utils/client.go | 7 ++ tests/pkg/utils/mco_configmaps.go | 8 +- tests/pkg/utils/mco_deployments.go | 41 +------ 5 files changed, 75 insertions(+), 105 deletions(-) diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 3e2b303bb..c5e765759 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -15,19 +15,9 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +func runCertRenewTests(clusterConfig utils.Cluster) { BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - clusterName := utils.GetManagedClusterName(testOptions) - if clusterName == hubManagedClusterName { + if clusterConfig.Name == hubManagedClusterName { namespace = hubMetricsCollectorNamespace isHub = false } @@ -164,7 +154,7 @@ var _ = Describe("Observability:", func() { } // debug code to check label "cert/time-restarted" deployment, err := utils.GetDeployment( - testOptions, + clusterConfig, isHub, "metrics-collector-deployment", namespace, @@ -190,4 +180,10 @@ var _ = Describe("Observability:", func() { namespace = MCO_ADDON_NAMESPACE isHub = false }) +} + +var _ = Describe("Observability:", func() { + for _, clusterConfig := range testOptions.ManagedClusters { + runCertRenewTests(clusterConfig) + } }) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 446432e18..ab27aa0c6 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -5,6 +5,7 @@ package tests import ( + "fmt" "os" . "github.com/onsi/ginkgo" @@ -17,19 +18,9 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +func runMetricsCollectorTests(clusterConfig utils.Cluster) { BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - clusterName := utils.GetManagedClusterName(testOptions) - if clusterName == hubManagedClusterName { + if clusterConfig.Name == hubManagedClusterName { namespace = hubMetricsCollectorNamespace isHub = false } @@ -42,46 +33,49 @@ var _ = Describe("Observability:", func() { err error dep *appv1.Deployment ) - Eventually(func() error { - dep, err = utils.GetDeployment( - testOptions, - isHub, - "metrics-collector-deployment", - namespace, - ) - return err - }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) - - Eventually(func() error { - err = utils.DeleteDeployment( - testOptions, - isHub, - "metrics-collector-deployment", - namespace, - ) - return err - }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) - - Eventually(func() bool { - newDep, err = utils.GetDeployment( - testOptions, - isHub, - "metrics-collector-deployment", - namespace, - ) - if err == nil { - if dep.ObjectMeta.ResourceVersion != newDep.ObjectMeta.ResourceVersion { - return true + By(fmt.Sprintf("Handling cluster: %s", clusterConfig.Name), func() { + Eventually(func() error { + dep, err = utils.GetDeployment( + clusterConfig, + isHub, + "metrics-collector-deployment", + namespace, + ) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + Eventually(func() error { + err = utils.DeleteDeployment( + testOptions, + isHub, + "metrics-collector-deployment", + namespace, + ) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + Eventually(func() bool { + newDep, err = utils.GetDeployment( + clusterConfig, + isHub, + "metrics-collector-deployment", + namespace, + ) + if err == nil { + if dep.ObjectMeta.ResourceVersion != newDep.ObjectMeta.ResourceVersion { + return true + } } - } - return false - }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + + }) }) It("[Stable] Updating metrics-collector deployment", func() { updateSaName := "test-serviceaccount" Eventually(func() error { newDep, err = utils.GetDeployment( - testOptions, + clusterConfig, isHub, "metrics-collector-deployment", namespace, @@ -91,7 +85,7 @@ var _ = Describe("Observability:", func() { } newDep.Spec.Template.Spec.ServiceAccountName = updateSaName newDep, err = utils.UpdateDeployment( - testOptions, + clusterConfig, isHub, "metrics-collector-deployment", namespace, @@ -102,7 +96,7 @@ var _ = Describe("Observability:", func() { Eventually(func() bool { revertDep, err := utils.GetDeployment( - testOptions, + clusterConfig, isHub, "metrics-collector-deployment", namespace, @@ -167,7 +161,7 @@ var _ = Describe("Observability:", func() { ) Eventually(func() error { err, cm = utils.GetConfigMap( - testOptions, + clusterConfig, isHub, "metrics-collector-serving-certs-ca-bundle", namespace, @@ -176,7 +170,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) Eventually(func() error { err = utils.DeleteConfigMap( - testOptions, + clusterConfig, isHub, "metrics-collector-serving-certs-ca-bundle", namespace, @@ -186,7 +180,7 @@ var _ = Describe("Observability:", func() { newCm := &v1.ConfigMap{} Eventually(func() bool { err, newCm = utils.GetConfigMap( - testOptions, + clusterConfig, isHub, "metrics-collector-serving-certs-ca-bundle", namespace, @@ -215,4 +209,10 @@ var _ = Describe("Observability:", func() { isHub = false }) +} + +var _ = Describe("Observability:", func() { + for _, clusterConfig := range testOptions.ManagedClusters { + runMetricsCollectorTests(clusterConfig) + } }) diff --git a/tests/pkg/utils/client.go b/tests/pkg/utils/client.go index cb606ee11..3ea7e6bd5 100644 --- a/tests/pkg/utils/client.go +++ b/tests/pkg/utils/client.go @@ -23,6 +23,13 @@ func getKubeClient(opt TestOptions, isHub bool) kubernetes.Interface { return clientKube } +func getKubeClientForCluster(clusterConfig Cluster, isHub bool) kubernetes.Interface { + return NewKubeClient( + clusterConfig.ClusterServerURL, + clusterConfig.KubeConfig, + clusterConfig.KubeContext) +} + func GetKubeClientDynamic(opt TestOptions, isHub bool) dynamic.Interface { url := opt.HubCluster.ClusterServerURL kubeConfig := opt.KubeConfig diff --git a/tests/pkg/utils/mco_configmaps.go b/tests/pkg/utils/mco_configmaps.go index cb47c2b9e..7f07c6212 100644 --- a/tests/pkg/utils/mco_configmaps.go +++ b/tests/pkg/utils/mco_configmaps.go @@ -38,9 +38,9 @@ func CreateConfigMap(opt TestOptions, isHub bool, cm *corev1.ConfigMap) error { return err } -func GetConfigMap(opt TestOptions, isHub bool, name string, +func GetConfigMap(clusterConfig Cluster, isHub bool, name string, namespace string) (error, *corev1.ConfigMap) { - clientKube := getKubeClient(opt, isHub) + clientKube := getKubeClientForCluster(clusterConfig, isHub) cm, err := clientKube.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to get configmap %s in namespace %s due to %v", name, namespace, err) @@ -48,8 +48,8 @@ func GetConfigMap(opt TestOptions, isHub bool, name string, return err, cm } -func DeleteConfigMap(opt TestOptions, isHub bool, name string, namespace string) error { - clientKube := getKubeClient(opt, isHub) +func DeleteConfigMap(clusterConfig Cluster, isHub bool, name string, namespace string) error { + clientKube := getKubeClientForCluster(clusterConfig, isHub) err := clientKube.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { klog.Errorf("Failed to delete configmap %s in namespace %s due to %v", name, namespace, err) diff --git a/tests/pkg/utils/mco_deployments.go b/tests/pkg/utils/mco_deployments.go index fa2841d83..0c698e723 100644 --- a/tests/pkg/utils/mco_deployments.go +++ b/tests/pkg/utils/mco_deployments.go @@ -6,16 +6,14 @@ package utils import ( "context" - "errors" - appv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) -func GetDeployment(opt TestOptions, isHub bool, name string, +func GetDeployment(clusterConfig Cluster, isHub bool, name string, namespace string) (*appv1.Deployment, error) { - clientKube := getKubeClient(opt, isHub) + clientKube := getKubeClientForCluster(clusterConfig, isHub) klog.V(1).Infof("Get deployment <%v> in namespace <%v>, isHub: <%v>", name, namespace, isHub) dep, err := clientKube.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { @@ -51,46 +49,15 @@ func DeleteDeployment(opt TestOptions, isHub bool, name string, namespace string } func UpdateDeployment( - opt TestOptions, + clusterConfig Cluster, isHub bool, name string, namespace string, dep *appv1.Deployment) (*appv1.Deployment, error) { - clientKube := getKubeClient(opt, isHub) + clientKube := getKubeClientForCluster(clusterConfig, isHub) updateDep, err := clientKube.AppsV1().Deployments(namespace).Update(context.TODO(), dep, metav1.UpdateOptions{}) if err != nil { klog.Errorf("Failed to update deployment %s in namespace %s due to %v", name, namespace, err) } return updateDep, err } - -func UpdateDeploymentReplicas( - opt TestOptions, - deployName, crProperty string, - desiredReplicas, expectedReplicas int32, -) error { - clientDynamic := GetKubeClientDynamic(opt, true) - deploy, err := GetDeployment(opt, true, deployName, MCO_NAMESPACE) - if err != nil { - return err - } - deploy.Spec.Replicas = &desiredReplicas - _, err = UpdateDeployment(opt, true, deployName, MCO_NAMESPACE, deploy) - if err != nil { - return err - } - - obs, err := clientDynamic.Resource(NewMCOMObservatoriumGVR()). - Namespace(MCO_NAMESPACE). - Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if err != nil { - return err - } - thanos := obs.Object["spec"].(map[string]interface{})["thanos"] - currentReplicas := thanos.(map[string]interface{})[crProperty].(map[string]interface{})["replicas"].(int64) - if int(currentReplicas) != int(expectedReplicas) { - klog.Errorf("Failed to update deployment %s replicas to %v", deployName, expectedReplicas) - return errors.New("the replicas was not updated successfully") - } - return nil -} From edf7c7eeb0596215071260a2bed1eed30fead596 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Fri, 3 May 2024 10:43:36 +0200 Subject: [PATCH 04/33] fix Signed-off-by: Coleen Iona Quadros --- tests/pkg/tests/observability_dashboard_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 80cd3417e..0feb9270c 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -71,7 +71,7 @@ var _ = Describe("Observability:", func() { It("[P2][Sev2][observability][Stable] Should have no custom dashboard in grafana after related configmap removed (dashboard/g0)", func() { By("Deleting custom dashboard configmap") - err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) + err = utils.DeleteConfigMap(testOptions.ManagedClusters[0], true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { _, result := utils.ContainDashboard(testOptions, updateDashboardTitle) From a50d02da1e65d446872698f0a78f111e75a22f5f Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Fri, 3 May 2024 12:09:57 +0200 Subject: [PATCH 05/33] refactor Signed-off-by: Coleen Iona Quadros --- tests/pkg/tests/observability_addon_test.go | 9 --------- tests/pkg/tests/observability_alert_test.go | 5 ----- tests/pkg/tests/observability_dashboard_test.go | 12 ------------ .../tests/observability_endpoint_preserve_test.go | 2 ++ tests/pkg/tests/observability_export_test.go | 12 ------------ tests/pkg/tests/observability_grafana_test.go | 12 ------------ tests/pkg/tests/observability_manifestwork_test.go | 9 --------- tests/pkg/tests/observability_metrics_test.go | 5 ----- .../observability_observatorium_preserve_test.go | 5 ----- 9 files changed, 2 insertions(+), 69 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 321748964..e5edff902 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -19,15 +19,6 @@ import ( var _ = Describe("Observability:", func() { BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) if utils.GetManagedClusterName(testOptions) == hubManagedClusterName { Skip("Skip the case for local-cluster since no observability addon") } diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index a25df77e5..7a2766106 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -39,11 +39,6 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) }) statefulsetLabels := [...]string{ ALERTMANAGER_LABEL, diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 0feb9270c..4518f71c7 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -19,18 +19,6 @@ const ( ) var _ = Describe("Observability:", func() { - BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - }) - It("[P2][Sev2][observability][Stable] Should have custom dashboard which defined in configmap (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index ab27aa0c6..6e3644281 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -6,6 +6,7 @@ package tests import ( "fmt" + "k8s.io/klog/v2" "os" . "github.com/onsi/ginkgo" @@ -213,6 +214,7 @@ func runMetricsCollectorTests(clusterConfig utils.Cluster) { var _ = Describe("Observability:", func() { for _, clusterConfig := range testOptions.ManagedClusters { + klog.Error("Coleen Running metrics collector tests for cluster: ", clusterConfig.Name runMetricsCollectorTests(clusterConfig) } }) diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index 43966bad3..634a1a1b1 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -17,18 +17,6 @@ import ( ) var _ = Describe("Observability:", func() { - BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - }) - JustBeforeEach(func() { Eventually(func() error { clusters, clusterError = utils.ListManagedClusters(testOptions) diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 818656160..72fba5f1b 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -14,18 +14,6 @@ import ( ) var _ = Describe("Observability:", func() { - BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - }) - It("@BVT - [P1][Sev1][observability][Stable] Should have metric data in grafana console (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index f654a6ebc..418fbceb1 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -17,15 +17,6 @@ import ( var _ = Describe("Observability:", func() { BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) if utils.GetManagedClusterName(testOptions) == hubManagedClusterName { Skip("Skip the case for local-cluster since no observability addon") } diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 6e6ed9c38..22f9586ea 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -33,11 +33,6 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext) - - dynClient = utils.NewKubeClientDynamic( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) }) JustBeforeEach(func() { diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index d2786f420..bae4c7493 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -16,11 +16,6 @@ import ( var _ = Describe("Observability:", func() { BeforeEach(func() { - hubClient = utils.NewKubeClient( - testOptions.HubCluster.ClusterServerURL, - testOptions.KubeConfig, - testOptions.HubCluster.KubeContext) - dynClient = utils.NewKubeClientDynamic( testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, From 486aa705da1a48b3f474b9d29515a1b58edc4482 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Fri, 3 May 2024 12:27:14 +0200 Subject: [PATCH 06/33] syntax Signed-off-by: Coleen Iona Quadros --- tests/pkg/tests/observability_endpoint_preserve_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 6e3644281..85a60bc37 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -214,7 +214,7 @@ func runMetricsCollectorTests(clusterConfig utils.Cluster) { var _ = Describe("Observability:", func() { for _, clusterConfig := range testOptions.ManagedClusters { - klog.Error("Coleen Running metrics collector tests for cluster: ", clusterConfig.Name + klog.Error("Coleen Running metrics collector tests for cluster: ", clusterConfig.Name) runMetricsCollectorTests(clusterConfig) } }) From 57804ac43bae7b4067becbe51a7e6c8c649aee99 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 23 May 2024 11:21:50 +0200 Subject: [PATCH 07/33] update kind tests to add new managed cluster Signed-off-by: Coleen Iona Quadros --- Makefile | 3 + cicd-scripts/.hub-kubeconfig | 0 cicd-scripts/setup-e2e-tests.sh | 101 +++++++++--- examples/mco/e2e/v1beta1/observability.yaml | 32 ++++ examples/mco/e2e/v1beta1/observability.yaml-e | 49 ++++++ examples/mco/e2e/v1beta2/observability.yaml | 23 +++ examples/mco/e2e/v1beta2/observability.yaml-e | 146 ++++++++++++++++++ examples/minio-tls/minio-pvc.yaml | 2 +- examples/minio-tls/minio-pvc.yaml-e | 14 ++ examples/minio/minio-pvc.yaml | 2 +- examples/minio/minio-pvc.yaml-e | 14 ++ go.mod | 2 +- .../config/manager/kustomization.yaml | 2 +- .../manifests/base/grafana/deployment.yaml | 23 ++- .../manifests/base/grafana/deployment.yaml-e | 145 +++++++++++++++++ scripts/bootstrap-kind-env.sh | 10 ++ test.sh | 32 ++++ .../observability_endpoint_preserve_test.go | 3 +- tests/pkg/utils/mco_deployments.go | 1 + tests/pkg/utils/mco_managedcluster.go | 7 +- tests/pkg/utils/mco_oba.go | 42 ----- .../run-in-kind/kind/kind-managed.config.yaml | 17 ++ tests/run-in-kind/run-e2e-in-kind.sh | 5 +- 23 files changed, 589 insertions(+), 86 deletions(-) create mode 100644 cicd-scripts/.hub-kubeconfig create mode 100644 examples/mco/e2e/v1beta1/observability.yaml-e create mode 100644 examples/mco/e2e/v1beta2/observability.yaml-e create mode 100644 examples/minio-tls/minio-pvc.yaml-e create mode 100644 examples/minio/minio-pvc.yaml-e create mode 100644 operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e create mode 100755 test.sh create mode 100644 tests/run-in-kind/kind/kind-managed.config.yaml diff --git a/Makefile b/Makefile index b965c65f6..b262e4a03 100644 --- a/Makefile +++ b/Makefile @@ -98,6 +98,7 @@ kind-env: @echo "Setting up KinD cluster" @./scripts/bootstrap-kind-env.sh @echo "Cluster has been created" + kind export kubeconfig --name=managed kind export kubeconfig --name=hub kubectl label node hub-control-plane node-role.kubernetes.io/master='' @@ -108,6 +109,8 @@ mco-kind-env: kind-env @echo "Local environment has been set up" @echo "Installing MCO" @kind get kubeconfig --name hub > /tmp/hub.yaml + @kind get kubeconfig --name managed > /tmp/managed.yaml + @kind get kubeconfig --name hub --internal > ./.hub-kubeconfig KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true KUSTOMIZE_VERSION=${KUSTOMIZE_VERSION} ./cicd-scripts/setup-e2e-tests.sh diff --git a/cicd-scripts/.hub-kubeconfig b/cicd-scripts/.hub-kubeconfig new file mode 100644 index 000000000..e69de29bb diff --git a/cicd-scripts/setup-e2e-tests.sh b/cicd-scripts/setup-e2e-tests.sh index 028a7a2df..30f75bb9d 100755 --- a/cicd-scripts/setup-e2e-tests.sh +++ b/cicd-scripts/setup-e2e-tests.sh @@ -22,7 +22,7 @@ AGENT_NS="open-cluster-management-agent" HUB_NS="open-cluster-management-hub" OBSERVABILITY_NS="open-cluster-management-observability" IMAGE_REPO="quay.io/stolostron" -export MANAGED_CLUSTER="local-cluster" # registration-operator needs this +#export MANAGED_CLUSTER="local-cluster" # registration-operator needs this SED_COMMAND=${SED}' -i-e -e' @@ -43,32 +43,40 @@ deploy_hub_spoke_core() { REGISTRATION_LATEST_SNAPSHOT='2.4.9-SNAPSHOT-2022-11-17-20-19-31' make cluster-ip IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} make deploy IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} - - # wait until hub and spoke are ready + # wait until hub and spoke are ready wait_for_deployment_ready 10 60s ${HUB_NS} cluster-manager-registration-controller cluster-manager-registration-webhook cluster-manager-work-webhook wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent + } # approve the CSR for cluster join request approve_csr_joinrequest() { echo "wait for CSR for cluster join reqest is created..." - for i in {1..60}; do - # TODO(morvencao): remove the hard-coded cluster label - csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER}) - if [[ -n ${csrs} ]]; then - csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name}) - for csrname in ${csrnames}; do - echo "approve CSR: ${csrname}" - kubectl certificate approve ${csrname} + managed_clusters=("local-cluster" "managed-cluster-1") + + KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true + #kubectl config use-context kind-hub + for MANAGED_CLUSTER in "${managed_clusters[@]}"; do + echo "Processing CSRs for ${MANAGED_CLUSTER}..." + for i in {1..60}; do + # TODO(morvencao): remove the hard-coded cluster label + # for loop for the case that multiple clusters are created + csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER}) + if [[ -n ${csrs} ]]; then + csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name}) + for csrname in ${csrnames}; do + echo "approve CSR: ${csrname}" + kubectl certificate approve ${csrname} + done + break + fi + if [[ ${i} -eq 60 ]]; then + echo "timeout wait for CSR is created." + exit 1 + fi + echo "retrying in 10s..." + sleep 10 done - break - fi - if [[ ${i} -eq 60 ]]; then - echo "timeout wait for CSR is created." - exit 1 - fi - echo "retrying in 10s..." - sleep 10 done for i in {1..20}; do @@ -130,6 +138,7 @@ EOF # deploy the MCO operator via the kustomize resources deploy_mco_operator() { + kubectl config use-context kind-hub if [[ -n ${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF} ]]; then cd ${ROOTDIR}/operators/multiclusterobservability/config/manager && kustomize edit set image quay.io/stolostron/multicluster-observability-operator=${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF} else @@ -216,10 +225,60 @@ wait_for_deployment_ready() { done } +deploy_managed_cluster() { + echo "Setting Kubernetes context to the managed cluster..." + + + KUBECONFIG=/tmp/managed.yaml IS_KIND_ENV=true + kubectl config use-context kind-managed + export MANAGED_CLUSTER="managed-cluster-1" + + cd ${ROOTDIR} + # we are pinned here so no need to re-fetch if we have the project locally. + if [[ ! -d "registration-operator" ]]; then + git clone --depth 1 -b release-2.4 https://github.com/stolostron/registration-operator.git + fi + cd registration-operator + REGISTRATION_LATEST_SNAPSHOT='2.4.9-SNAPSHOT-2022-11-17-20-19-31' + ${SED_COMMAND} "s~clusterName: cluster1$~clusterName: ${MANAGED_CLUSTER}~g" deploy/klusterlet/config/samples/operator_open-cluster-management_klusterlets.cr.yaml + make deploy-spoke IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} + wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent + } + +deploy_hub_and_managed_cluster() { + cd $(dirname ${BASH_SOURCE}) + + set -e + + hub=${CLUSTER1:-hub} + hub_name="local-cluster" + c1=${CLUSTER1:-managed} + + hubctx="kind-${hub}" + c1ctx="kind-${c1}" + + echo "Initialize the ocm hub cluster\n" # ./.hub-kubeconfig is default value of HUB_KUBECONFIG + clusteradm init --wait --context ${hubctx} + joincmd=$(clusteradm get token --context ${hubctx} | grep clusteradm) + + echo "Join hub to hub\n" + $(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${hubctx} | sed "s//$hub_name/g") + KUBECONFIG=/tmp/managed.yaml IS_KIND_ENV=true + echo "Join cluster1 to hub\n" + $(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${c1ctx} | sed "s//$c1/g") + + echo "Accept join of hub,cluster1" + KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true + clusteradm accept --context ${hubctx} --clusters ${c1},${hub_name} --skip-approve-check + + kubectl get managedclusters --all-namespaces --context ${hubctx} +} # function execute is the main routine to do the actual work execute() { - deploy_hub_spoke_core - approve_csr_joinrequest +# deploy_hub_spoke_core +# approve_csr_joinrequest +# deploy_managed_cluster + deploy_hub_and_managed_cluster deploy_mco_operator deploy_grafana_test echo "OCM and MCO are installed successfully..." diff --git a/examples/mco/e2e/v1beta1/observability.yaml b/examples/mco/e2e/v1beta1/observability.yaml index ee59f4ce4..0d55e03f6 100644 --- a/examples/mco/e2e/v1beta1/observability.yaml +++ b/examples/mco/e2e/v1beta1/observability.yaml @@ -3,6 +3,38 @@ kind: MultiClusterObservability metadata: name: observability annotations: + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 spec: nodeSelector: kubernetes.io/os: linux diff --git a/examples/mco/e2e/v1beta1/observability.yaml-e b/examples/mco/e2e/v1beta1/observability.yaml-e new file mode 100644 index 000000000..cf544ad34 --- /dev/null +++ b/examples/mco/e2e/v1beta1/observability.yaml-e @@ -0,0 +1,49 @@ +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: MultiClusterObservability +metadata: + name: observability + annotations: + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + test-env: kind-test + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 +spec: + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: {} + retentionResolutionRaw: 5d + retentionResolution5m: 14d + retentionResolution1h: 30d + storageConfigObject: + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + statefulSetSize: 1Gi + statefulSetStorageClass: gp2 diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 8359da246..ab742dbeb 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -3,6 +3,29 @@ kind: MultiClusterObservability metadata: name: observability annotations: + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 spec: advanced: retentionConfig: diff --git a/examples/mco/e2e/v1beta2/observability.yaml-e b/examples/mco/e2e/v1beta2/observability.yaml-e new file mode 100644 index 000000000..16674cab8 --- /dev/null +++ b/examples/mco/e2e/v1beta2/observability.yaml-e @@ -0,0 +1,146 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability + annotations: + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 + mco-thanos-without-resources-requests: true + mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 +spec: + advanced: + retentionConfig: + blockDuration: 3h + cleanupInterval: 6m + deleteDelay: 50h + retentionInLocal: 5d + retentionResolution1h: 31d + retentionResolution5m: 15d + retentionResolutionRaw: 6d + observatoriumAPI: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + queryFrontend: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + query: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + compact: + resources: + limits: + cpu: 1 + memory: 2Gi + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + receive: + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + rule: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 1 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + store: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + storeMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 2 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + queryFrontendMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + grafana: + replicas: 3 + resources: + limits: + cpu: 1 + memory: 1Gi + alertmanager: + replicas: 2 + resources: + limits: + cpu: 100m + memory: 400Mi + rbacQueryProxy: + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 30 + resources: + limits: + cpu: 200m + memory: 700Mi + requests: + cpu: 10m + memory: 100Mi + storageConfig: + alertmanagerStorageSize: 1Gi + compactStorageSize: 1Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + receiveStorageSize: 1Gi + ruleStorageSize: 1Gi + storageClass: gp2 + storeStorageSize: 1Gi diff --git a/examples/minio-tls/minio-pvc.yaml b/examples/minio-tls/minio-pvc.yaml index a501dbf00..20023982d 100644 --- a/examples/minio-tls/minio-pvc.yaml +++ b/examples/minio-tls/minio-pvc.yaml @@ -6,7 +6,7 @@ metadata: name: minio namespace: open-cluster-management-observability spec: - storageClassName: gp3-csi + storageClassName: standard accessModes: - ReadWriteOnce resources: diff --git a/examples/minio-tls/minio-pvc.yaml-e b/examples/minio-tls/minio-pvc.yaml-e new file mode 100644 index 000000000..20023982d --- /dev/null +++ b/examples/minio-tls/minio-pvc.yaml-e @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/name: minio + name: minio + namespace: open-cluster-management-observability +spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" diff --git a/examples/minio/minio-pvc.yaml b/examples/minio/minio-pvc.yaml index a501dbf00..20023982d 100644 --- a/examples/minio/minio-pvc.yaml +++ b/examples/minio/minio-pvc.yaml @@ -6,7 +6,7 @@ metadata: name: minio namespace: open-cluster-management-observability spec: - storageClassName: gp3-csi + storageClassName: standard accessModes: - ReadWriteOnce resources: diff --git a/examples/minio/minio-pvc.yaml-e b/examples/minio/minio-pvc.yaml-e new file mode 100644 index 000000000..20023982d --- /dev/null +++ b/examples/minio/minio-pvc.yaml-e @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/name: minio + name: minio + namespace: open-cluster-management-observability +spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" diff --git a/go.mod b/go.mod index 996f2edd4..eca96f186 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ require ( k8s.io/apimachinery v0.28.2 k8s.io/client-go v12.0.0+incompatible k8s.io/klog v1.0.0 + k8s.io/klog/v2 v2.100.1 k8s.io/kubectl v0.27.2 open-cluster-management.io/addon-framework v0.8.1-0.20231128122622-3bfdbffb237c open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30 @@ -163,7 +164,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiserver v0.27.2 // indirect k8s.io/component-base v0.27.2 // indirect - k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-aggregator v0.26.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect diff --git a/operators/multiclusterobservability/config/manager/kustomization.yaml b/operators/multiclusterobservability/config/manager/kustomization.yaml index ae5e515ac..226eea9fd 100644 --- a/operators/multiclusterobservability/config/manager/kustomization.yaml +++ b/operators/multiclusterobservability/config/manager/kustomization.yaml @@ -6,6 +6,6 @@ kind: Kustomization images: - name: quay.io/stolostron/multicluster-observability-operator newName: quay.io/stolostron/multicluster-observability-operator - newTag: latest + newTag: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 patches: - path: manager_webhook_patch.yaml diff --git a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml index ede8d8f2b..e98490f25 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml @@ -2,21 +2,18 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - app: multicluster-observability-grafana - observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" - name: grafana + app: multicluster-observability-grafana-test + name: grafana-test namespace: open-cluster-management-observability spec: - replicas: 2 + replicas: 1 selector: matchLabels: - app: multicluster-observability-grafana - observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + app: multicluster-observability-grafana-test template: metadata: labels: - app: multicluster-observability-grafana - observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + app: multicluster-observability-grafana-test spec: affinity: podAntiAffinity: @@ -42,9 +39,9 @@ spec: containers: - args: - -config=/etc/grafana/grafana.ini - image: quay.io/stolostron/grafana:2.4.0-SNAPSHOT-2021-09-23-07-02-14 + image: quay.io/stolostron/grafana:2.11.0-SNAPSHOT-2024-05-14-04-35-15 imagePullPolicy: IfNotPresent - name: grafana + name: grafana-test ports: - containerPort: 3001 name: http @@ -72,7 +69,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: quay.io/stolostron/grafana-dashboard-loader:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + image: quay.io/stolostron/grafana-dashboard-loader:2.11.0-SNAPSHOT-2024-05-14-04-35-15 imagePullPolicy: IfNotPresent resources: requests: @@ -133,11 +130,11 @@ spec: - name: grafana-datasources secret: defaultMode: 420 - secretName: grafana-datasources + secretName: grafana-datasources-test - name: grafana-config secret: defaultMode: 420 - secretName: grafana-config + secretName: grafana-config-test - name: tls-secret secret: defaultMode: 420 diff --git a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e new file mode 100644 index 000000000..e98490f25 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e @@ -0,0 +1,145 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: multicluster-observability-grafana-test + name: grafana-test + namespace: open-cluster-management-observability +spec: + replicas: 1 + selector: + matchLabels: + app: multicluster-observability-grafana-test + template: + metadata: + labels: + app: multicluster-observability-grafana-test + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 70 + podAffinityTerm: + topologyKey: topology.kubernetes.io/zone + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - multicluster-observability-grafana + - weight: 30 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - multicluster-observability-grafana + containers: + - args: + - -config=/etc/grafana/grafana.ini + image: quay.io/stolostron/grafana:2.11.0-SNAPSHOT-2024-05-14-04-35-15 + imagePullPolicy: IfNotPresent + name: grafana-test + ports: + - containerPort: 3001 + name: http + protocol: TCP + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 4m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-storage + - mountPath: /etc/grafana/provisioning/datasources + name: grafana-datasources + - mountPath: /etc/grafana + name: grafana-config + securityContext: + privileged: false + readOnlyRootFilesystem: true + - name: grafana-dashboard-loader + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: quay.io/stolostron/grafana-dashboard-loader:2.11.0-SNAPSHOT-2024-05-14-04-35-15 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 4m + memory: 50Mi + securityContext: + privileged: false + readOnlyRootFilesystem: true + - readinessProbe: + httpGet: + path: /oauth/healthz + port: 9443 + scheme: HTTPS + timeoutSeconds: 1 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + name: grafana-proxy + ports: + - name: public + containerPort: 9443 + protocol: TCP + imagePullPolicy: IfNotPresent + volumeMounts: + - name: tls-secret + mountPath: /etc/tls/private + - mountPath: /etc/proxy/secrets + name: cookie-secret + image: quay.io/stolostron/origin-oauth-proxy:4.5 + args: + - '--provider=openshift' + - '--upstream=http://localhost:3001' + - '--https-address=:9443' + - '--cookie-secret-file=/etc/proxy/secrets/session_secret' + - '--cookie-expire=12h0m0s' + - '--cookie-refresh=8h0m0s' + - '--openshift-delegate-urls={"/": {"resource": "projects", "verb": "list"}}' + - '--tls-cert=/etc/tls/private/tls.crt' + - '--tls-key=/etc/tls/private/tls.key' + - '--openshift-service-account=grafana' + - '--pass-user-bearer-token=true' + - '--pass-access-token=true' + - '--client-id=grafana-proxy-client' + - '--client-secret=grafana-proxy-client' + - '--scope=user:full' + - '--openshift-ca=/etc/pki/tls/cert.pem' + - '--openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' + securityContext: + privileged: false + readOnlyRootFilesystem: true + serviceAccount: grafana + imagePullSecrets: + - name: multiclusterhub-operator-pull-secret + serviceAccountName: grafana + volumes: + - emptyDir: {} + name: grafana-storage + - name: grafana-datasources + secret: + defaultMode: 420 + secretName: grafana-datasources-test + - name: grafana-config + secret: + defaultMode: 420 + secretName: grafana-config-test + - name: tls-secret + secret: + defaultMode: 420 + secretName: grafana-tls + - name: cookie-secret + secret: + defaultMode: 420 + secretName: rbac-proxy-cookie-secret diff --git a/scripts/bootstrap-kind-env.sh b/scripts/bootstrap-kind-env.sh index 206ce9883..e92607264 100755 --- a/scripts/bootstrap-kind-env.sh +++ b/scripts/bootstrap-kind-env.sh @@ -39,12 +39,22 @@ deploy_openshift_router() { kubectl apply -f ${WORKDIR}/router/ } +create_kind_cluster_managed() { + echo "Coleen Delete the KinD cluster if exists" + kind delete cluster --name $1 || true + rm -rf $HOME/.kube/kind-config-$1 + + echo "Start KinD cluster with the default cluster name - $1" + kind create cluster --kubeconfig $HOME/.kube/kind-config-$1 --name $1 --config ${WORKDIR}/kind/kind-$1.config.yaml +} + run() { create_kind_cluster hub deploy_crds deploy_templates deploy_service_ca_operator deploy_openshift_router + create_kind_cluster_managed managed } run diff --git a/test.sh b/test.sh new file mode 100755 index 000000000..b1e4db4ff --- /dev/null +++ b/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +cd $(dirname ${BASH_SOURCE}) + +set -e + +hub=${CLUSTER1:-hub} +c1=${CLUSTER1:-cluster1} +c2=${CLUSTER2:-cluster2} + +hubctx="kind-${hub}" +c1ctx="kind-${c1}" +c2ctx="kind-${c2}" + +kind create cluster --name "${hub}" +kind create cluster --name "${c1}" +kind create cluster --name "${c2}" + +echo "Initialize the ocm hub cluster\n" +clusteradm init --wait --context ${hubctx} +joincmd=$(clusteradm get token --context ${hubctx} | grep clusteradm) + +echo "Join cluster1 to hub\n" +$(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${c1ctx} | sed "s//$c1/g") + +echo "Join cluster2 to hub\n" +$(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${c2ctx} | sed "s//$c2/g") + +echo "Accept join of cluster1 and cluster2" +clusteradm accept --context ${hubctx} --clusters ${c1},${c2} --wait + +kubectl get managedclusters --all-namespaces --context ${hubctx} diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 85a60bc37..fc461baee 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -6,9 +6,10 @@ package tests import ( "fmt" - "k8s.io/klog/v2" "os" + "k8s.io/klog/v2" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" diff --git a/tests/pkg/utils/mco_deployments.go b/tests/pkg/utils/mco_deployments.go index 0c698e723..f3856e448 100644 --- a/tests/pkg/utils/mco_deployments.go +++ b/tests/pkg/utils/mco_deployments.go @@ -6,6 +6,7 @@ package utils import ( "context" + appv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index a92b4e205..29d450402 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -7,7 +7,6 @@ package utils import ( "context" "errors" - goversion "github.com/hashicorp/go-version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -51,8 +50,10 @@ func ListManagedClusters(opt TestOptions) ([]string, error) { for _, obj := range objs.Items { metadata := obj.Object["metadata"].(map[string]interface{}) name := metadata["name"].(string) - labels := metadata["labels"].(map[string]interface{}) - if labels != nil { + if name == "local-cluster" { + continue + } + if labels, ok := metadata["labels"].(map[string]interface{}); ok { obsControllerStr := "" if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { obsControllerStr = obsController.(string) diff --git a/tests/pkg/utils/mco_oba.go b/tests/pkg/utils/mco_oba.go index 43dfbdae1..9a2e92d3c 100644 --- a/tests/pkg/utils/mco_oba.go +++ b/tests/pkg/utils/mco_oba.go @@ -54,25 +54,6 @@ func CheckOBADeleted(opt TestOptions, namespace string) error { return nil } -func CheckManagedClusterAddonsStatus(opt TestOptions, namespace, status string) error { - dynClient := NewKubeClientDynamic( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - - mca, err := dynClient.Resource(NewMCOManagedClusterAddonsGVR()). - Namespace(namespace). - Get(context.TODO(), "observability-controller", metav1.GetOptions{}) - if err != nil { - return err - } - if mca.Object["status"] != nil && strings.Contains(fmt.Sprint(mca.Object["status"]), status) { - return nil - } else { - return fmt.Errorf("observability-controller is disabled for managed cluster %s", namespace) - } -} - func CheckAllOBAsEnabled(opt TestOptions) error { clusters, err := ListManagedClusters(opt) if err != nil { @@ -95,29 +76,6 @@ func CheckAllOBAsEnabled(opt TestOptions) error { return nil } -func CheckAllOBADisabled(opt TestOptions) error { - clusters, err := ListManagedClusters(opt) - if err != nil { - return err - } - for _, cluster := range clusters { - // skip the check for local-cluster - if cluster == "local-cluster" { - klog.V(1).Infof("Skip OBA status for managedcluster: %v", cluster) - continue - } - err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnDisabledMessage) - if err != nil { - return err - } - err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnDisabledMessage) - if err != nil { - return err - } - } - return nil -} - func CheckAllOBAsDeleted(opt TestOptions) error { clusters, err := ListManagedClusters(opt) if err != nil { diff --git a/tests/run-in-kind/kind/kind-managed.config.yaml b/tests/run-in-kind/kind/kind-managed.config.yaml new file mode 100644 index 000000000..9ad36aac7 --- /dev/null +++ b/tests/run-in-kind/kind/kind-managed.config.yaml @@ -0,0 +1,17 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 80 + hostPort: 83 + listenAddress: "0.0.0.0" + - containerPort: 444 + hostPort: 445 + listenAddress: "0.0.0.0" + - containerPort: 6444 + hostPort: 32808 + listenAddress: "0.0.0.0" + - containerPort: 31002 + hostPort: 31003 + listenAddress: "127.0.0.1" diff --git a/tests/run-in-kind/run-e2e-in-kind.sh b/tests/run-in-kind/run-e2e-in-kind.sh index 49d36ec29..5ab9b2621 100755 --- a/tests/run-in-kind/run-e2e-in-kind.sh +++ b/tests/run-in-kind/run-e2e-in-kind.sh @@ -14,7 +14,7 @@ export IS_KIND_ENV=true source ${WORKDIR}/env.sh create_kind_cluster() { - echo "Delete the KinD cluster if exists" + echo "Delete the KinD cluster if exists coleen" kind delete cluster --name $1 || true rm -rf $HOME/.kube/kind-config-$1 @@ -23,6 +23,7 @@ create_kind_cluster() { export KUBECONFIG=$HOME/.kube/kind-config-$1 } + deploy_service_ca_operator() { kubectl create ns openshift-config-managed kubectl apply -f ${WORKDIR}/service-ca/ @@ -51,6 +52,7 @@ run_e2e_test() { run() { create_kind_cluster hub + create_kind_cluster_managed managed deploy_crds deploy_templates deploy_service_ca_operator @@ -59,4 +61,3 @@ run() { run_e2e_test } -run From 062218aaa7398e2362ef64913d9fb5609f413106 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 23 May 2024 11:26:18 +0200 Subject: [PATCH 08/33] remove files Signed-off-by: Coleen Iona Quadros --- cicd-scripts/.hub-kubeconfig | 0 examples/mco/e2e/v1beta1/observability.yaml-e | 49 ------ examples/mco/e2e/v1beta2/observability.yaml-e | 146 ------------------ examples/minio-tls/minio-pvc.yaml-e | 14 -- examples/minio/minio-pvc.yaml-e | 14 -- .../manifests/base/grafana/deployment.yaml-e | 145 ----------------- test.sh | 32 ---- 7 files changed, 400 deletions(-) delete mode 100644 cicd-scripts/.hub-kubeconfig delete mode 100644 examples/mco/e2e/v1beta1/observability.yaml-e delete mode 100644 examples/mco/e2e/v1beta2/observability.yaml-e delete mode 100644 examples/minio-tls/minio-pvc.yaml-e delete mode 100644 examples/minio/minio-pvc.yaml-e delete mode 100644 operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e delete mode 100755 test.sh diff --git a/cicd-scripts/.hub-kubeconfig b/cicd-scripts/.hub-kubeconfig deleted file mode 100644 index e69de29bb..000000000 diff --git a/examples/mco/e2e/v1beta1/observability.yaml-e b/examples/mco/e2e/v1beta1/observability.yaml-e deleted file mode 100644 index cf544ad34..000000000 --- a/examples/mco/e2e/v1beta1/observability.yaml-e +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: observability.open-cluster-management.io/v1beta1 -kind: MultiClusterObservability -metadata: - name: observability - annotations: - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 -spec: - nodeSelector: - kubernetes.io/os: linux - observabilityAddonSpec: {} - retentionResolutionRaw: 5d - retentionResolution5m: 14d - retentionResolution1h: 30d - storageConfigObject: - metricObjectStorage: - key: thanos.yaml - name: thanos-object-storage - statefulSetSize: 1Gi - statefulSetStorageClass: gp2 diff --git a/examples/mco/e2e/v1beta2/observability.yaml-e b/examples/mco/e2e/v1beta2/observability.yaml-e deleted file mode 100644 index 16674cab8..000000000 --- a/examples/mco/e2e/v1beta2/observability.yaml-e +++ /dev/null @@ -1,146 +0,0 @@ -apiVersion: observability.open-cluster-management.io/v1beta2 -kind: MultiClusterObservability -metadata: - name: observability - annotations: - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 -spec: - advanced: - retentionConfig: - blockDuration: 3h - cleanupInterval: 6m - deleteDelay: 50h - retentionInLocal: 5d - retentionResolution1h: 31d - retentionResolution5m: 15d - retentionResolutionRaw: 6d - observatoriumAPI: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 - queryFrontend: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 - query: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 - serviceAccountAnnotations: - test.com/role-arn: 's3_role' - compact: - resources: - limits: - cpu: 1 - memory: 2Gi - serviceAccountAnnotations: - test.com/role-arn: 's3_role' - receive: - resources: - limits: - cpu: 1 - memory: 4Gi - replicas: 3 - serviceAccountAnnotations: - test.com/role-arn: 's3_role' - rule: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 1 - serviceAccountAnnotations: - test.com/role-arn: 's3_role' - store: - resources: - limits: - cpu: 1 - memory: 2Gi - replicas: 3 - serviceAccountAnnotations: - test.com/role-arn: 's3_role' - storeMemcached: - resources: - limits: - cpu: 1 - memory: 2Gi - replicas: 2 - memoryLimitMb: 2048 - maxItemSize: 2m - connectionLimit: 2048 - queryFrontendMemcached: - resources: - limits: - cpu: 1 - memory: 2Gi - replicas: 3 - memoryLimitMb: 2048 - maxItemSize: 2m - connectionLimit: 2048 - grafana: - replicas: 3 - resources: - limits: - cpu: 1 - memory: 1Gi - alertmanager: - replicas: 2 - resources: - limits: - cpu: 100m - memory: 400Mi - rbacQueryProxy: - replicas: 3 - resources: - limits: - cpu: 50m - memory: 200Mi - nodeSelector: - kubernetes.io/os: linux - observabilityAddonSpec: - enableMetrics: true - interval: 30 - resources: - limits: - cpu: 200m - memory: 700Mi - requests: - cpu: 10m - memory: 100Mi - storageConfig: - alertmanagerStorageSize: 1Gi - compactStorageSize: 1Gi - metricObjectStorage: - key: thanos.yaml - name: thanos-object-storage - receiveStorageSize: 1Gi - ruleStorageSize: 1Gi - storageClass: gp2 - storeStorageSize: 1Gi diff --git a/examples/minio-tls/minio-pvc.yaml-e b/examples/minio-tls/minio-pvc.yaml-e deleted file mode 100644 index 20023982d..000000000 --- a/examples/minio-tls/minio-pvc.yaml-e +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - app.kubernetes.io/name: minio - name: minio - namespace: open-cluster-management-observability -spec: - storageClassName: standard - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "1Gi" diff --git a/examples/minio/minio-pvc.yaml-e b/examples/minio/minio-pvc.yaml-e deleted file mode 100644 index 20023982d..000000000 --- a/examples/minio/minio-pvc.yaml-e +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - app.kubernetes.io/name: minio - name: minio - namespace: open-cluster-management-observability -spec: - storageClassName: standard - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "1Gi" diff --git a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e deleted file mode 100644 index e98490f25..000000000 --- a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml-e +++ /dev/null @@ -1,145 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: multicluster-observability-grafana-test - name: grafana-test - namespace: open-cluster-management-observability -spec: - replicas: 1 - selector: - matchLabels: - app: multicluster-observability-grafana-test - template: - metadata: - labels: - app: multicluster-observability-grafana-test - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 70 - podAffinityTerm: - topologyKey: topology.kubernetes.io/zone - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - multicluster-observability-grafana - - weight: 30 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - multicluster-observability-grafana - containers: - - args: - - -config=/etc/grafana/grafana.ini - image: quay.io/stolostron/grafana:2.11.0-SNAPSHOT-2024-05-14-04-35-15 - imagePullPolicy: IfNotPresent - name: grafana-test - ports: - - containerPort: 3001 - name: http - protocol: TCP - resources: - limits: - cpu: 500m - memory: 1Gi - requests: - cpu: 4m - memory: 100Mi - volumeMounts: - - mountPath: /var/lib/grafana - name: grafana-storage - - mountPath: /etc/grafana/provisioning/datasources - name: grafana-datasources - - mountPath: /etc/grafana - name: grafana-config - securityContext: - privileged: false - readOnlyRootFilesystem: true - - name: grafana-dashboard-loader - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/stolostron/grafana-dashboard-loader:2.11.0-SNAPSHOT-2024-05-14-04-35-15 - imagePullPolicy: IfNotPresent - resources: - requests: - cpu: 4m - memory: 50Mi - securityContext: - privileged: false - readOnlyRootFilesystem: true - - readinessProbe: - httpGet: - path: /oauth/healthz - port: 9443 - scheme: HTTPS - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - name: grafana-proxy - ports: - - name: public - containerPort: 9443 - protocol: TCP - imagePullPolicy: IfNotPresent - volumeMounts: - - name: tls-secret - mountPath: /etc/tls/private - - mountPath: /etc/proxy/secrets - name: cookie-secret - image: quay.io/stolostron/origin-oauth-proxy:4.5 - args: - - '--provider=openshift' - - '--upstream=http://localhost:3001' - - '--https-address=:9443' - - '--cookie-secret-file=/etc/proxy/secrets/session_secret' - - '--cookie-expire=12h0m0s' - - '--cookie-refresh=8h0m0s' - - '--openshift-delegate-urls={"/": {"resource": "projects", "verb": "list"}}' - - '--tls-cert=/etc/tls/private/tls.crt' - - '--tls-key=/etc/tls/private/tls.key' - - '--openshift-service-account=grafana' - - '--pass-user-bearer-token=true' - - '--pass-access-token=true' - - '--client-id=grafana-proxy-client' - - '--client-secret=grafana-proxy-client' - - '--scope=user:full' - - '--openshift-ca=/etc/pki/tls/cert.pem' - - '--openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' - securityContext: - privileged: false - readOnlyRootFilesystem: true - serviceAccount: grafana - imagePullSecrets: - - name: multiclusterhub-operator-pull-secret - serviceAccountName: grafana - volumes: - - emptyDir: {} - name: grafana-storage - - name: grafana-datasources - secret: - defaultMode: 420 - secretName: grafana-datasources-test - - name: grafana-config - secret: - defaultMode: 420 - secretName: grafana-config-test - - name: tls-secret - secret: - defaultMode: 420 - secretName: grafana-tls - - name: cookie-secret - secret: - defaultMode: 420 - secretName: rbac-proxy-cookie-secret diff --git a/test.sh b/test.sh deleted file mode 100755 index b1e4db4ff..000000000 --- a/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -cd $(dirname ${BASH_SOURCE}) - -set -e - -hub=${CLUSTER1:-hub} -c1=${CLUSTER1:-cluster1} -c2=${CLUSTER2:-cluster2} - -hubctx="kind-${hub}" -c1ctx="kind-${c1}" -c2ctx="kind-${c2}" - -kind create cluster --name "${hub}" -kind create cluster --name "${c1}" -kind create cluster --name "${c2}" - -echo "Initialize the ocm hub cluster\n" -clusteradm init --wait --context ${hubctx} -joincmd=$(clusteradm get token --context ${hubctx} | grep clusteradm) - -echo "Join cluster1 to hub\n" -$(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${c1ctx} | sed "s//$c1/g") - -echo "Join cluster2 to hub\n" -$(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${c2ctx} | sed "s//$c2/g") - -echo "Accept join of cluster1 and cluster2" -clusteradm accept --context ${hubctx} --clusters ${c1},${c2} --wait - -kubectl get managedclusters --all-namespaces --context ${hubctx} From 4cda1a7a150d1ae8fc684431899f0932879c3a4f Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Tue, 30 Apr 2024 15:47:50 +0200 Subject: [PATCH 09/33] [ACM-10812]: fix addon status not reported in hub (#1420) * init version Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * fix Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * env test Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * change withReload naming Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --------- Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .../observabilityaddon_controller.go | 22 +- ...bilityaddon_controller_integration_test.go | 7 +- .../observabilityaddon_controller_test.go | 12 +- .../controllers/status/status_controller.go | 136 +++++-- .../status_controller_integration_test.go | 201 ++++++++++ .../status/status_controller_test.go | 356 ++++++++++++++---- operators/endpointmetrics/main.go | 18 +- operators/endpointmetrics/pkg/util/client.go | 118 +++--- .../endpointmetrics/pkg/util/client_test.go | 39 -- operators/endpointmetrics/pkg/util/lease.go | 7 +- 10 files changed, 675 insertions(+), 241 deletions(-) create mode 100644 operators/endpointmetrics/controllers/status/status_controller_integration_test.go delete mode 100644 operators/endpointmetrics/pkg/util/client_test.go diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go index 571ae5700..dfd6b6718 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go @@ -75,7 +75,7 @@ var ( type ObservabilityAddonReconciler struct { Client client.Client Scheme *runtime.Scheme - HubClient client.Client + HubClient *util.ReloadableHubClient } // +kubebuilder:rbac:groups=observability.open-cluster-management.io.open-cluster-management.io,resources=observabilityaddons,verbs=get;list;watch;create;update;patch;delete @@ -113,18 +113,22 @@ func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.R } // Fetch the ObservabilityAddon instance in hub cluster - err := r.HubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon) - if err != nil { - hubClient, obsAddon, err := util.RenewAndRetry(ctx, r.Scheme) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to get observabilityaddon: %w", err) + fetchAddon := func() error { + return r.HubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon) + } + if err := fetchAddon(); err != nil { + if r.HubClient, err = r.HubClient.Reload(); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reload the hub client: %w", err) + } + + // Retry the operation once with the reloaded client + if err := fetchAddon(); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get ObservabilityAddon in hub cluster: %w", err) } - r.HubClient = hubClient - hubObsAddon = obsAddon } // Fetch the ObservabilityAddon instance in local cluster - err = r.Client.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: namespace}, obsAddon) + err := r.Client.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: namespace}, obsAddon) if err != nil { if !errors.IsNotFound(err) { return ctrl.Result{}, fmt.Errorf("failed to get observabilityaddon: %w", err) diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go index ec238df63..81753ed38 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go @@ -15,6 +15,7 @@ import ( hyperv1 "github.com/openshift/hypershift/api/v1alpha1" promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/hypershift" + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -67,9 +68,13 @@ func TestIntegrationReconcileHypershift(t *testing.T) { }) assert.NoError(t, err) + hubClientWithReload, err := util.NewReloadableHubClientWithReloadFunc(func() (client.Client, error) { + return k8sClient, nil + }) + assert.NoError(t, err) reconciler := ObservabilityAddonReconciler{ Client: k8sClient, - HubClient: k8sClient, + HubClient: hubClientWithReload, } err = reconciler.SetupWithManager(mgr) diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go index 9a790023e..39a2549af 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/openshift" @@ -178,12 +179,17 @@ alertmanager-router-ca: | } hubClient := fake.NewClientBuilder().WithRuntimeObjects(hubObjs...).Build() - util.SetHubClient(hubClient) c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + hubClientWithReload, err := util.NewReloadableHubClientWithReloadFunc(func() (client.Client, error) { + return hubClient, nil + }) + if err != nil { + t.Fatalf("Failed to create hub client with reload: %v", err) + } r := &ObservabilityAddonReconciler{ Client: c, - HubClient: hubClient, + HubClient: hubClientWithReload, } // test error in reconcile if missing obervabilityaddon @@ -194,7 +200,7 @@ alertmanager-router-ca: | }, } ctx := context.TODO() - _, err := r.Reconcile(ctx, req) + _, err = r.Reconcile(ctx, req) if err == nil { t.Fatalf("reconcile: miss the error for missing obervabilityaddon") } diff --git a/operators/endpointmetrics/controllers/status/status_controller.go b/operators/endpointmetrics/controllers/status/status_controller.go index 1c0e81d23..dffdd83d5 100644 --- a/operators/endpointmetrics/controllers/status/status_controller.go +++ b/operators/endpointmetrics/controllers/status/status_controller.go @@ -6,39 +6,33 @@ package status import ( "context" - "os" + "fmt" + "net" "reflect" + "time" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/go-logr/logr" "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" ) -var ( - log = ctrl.Log.WithName("controllers").WithName("Status") -) - -const ( - obAddonName = "observability-addon" -) - -var ( - namespace = os.Getenv("WATCH_NAMESPACE") - hubNamespace = os.Getenv("HUB_NAMESPACE") -) - // StatusReconciler reconciles status object. type StatusReconciler struct { - Client client.Client - Scheme *runtime.Scheme - HubClient client.Client + Client client.Client + HubNamespace string + Namespace string + HubClient *util.ReloadableHubClient + ObsAddonName string + Logger logr.Logger } // Reconcile reads that state of the cluster for a ObservabilityAddon object and makes changes based on the state read @@ -46,34 +40,57 @@ type StatusReconciler struct { // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *StatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) - log.Info("Reconciling") + r.Logger.WithValues("Request", req.String()).Info("Reconciling") // Fetch the ObservabilityAddon instance in hub cluster hubObsAddon := &oav1beta1.ObservabilityAddon{} - err := r.HubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon) + err := r.HubClient.Get(ctx, types.NamespacedName{Name: r.ObsAddonName, Namespace: r.HubNamespace}, hubObsAddon) if err != nil { - hubClient, obsAddon, err := util.RenewAndRetry(ctx, r.Scheme) - if err != nil { - return ctrl.Result{}, err + if isAuthOrConnectionErr(err) { + // Try reloading the kubeconfig for the hub cluster + var reloadErr error + if r.HubClient, reloadErr = r.HubClient.Reload(); reloadErr != nil { + return ctrl.Result{}, fmt.Errorf("failed to reload the hub client: %w", reloadErr) + } + r.Logger.Info("Failed to get ObservabilityAddon in hub cluster, reloaded hub, requeue with delay", "error", err) + return ctrl.Result{Requeue: true}, nil + } + + if isTransientErr(err) { + r.Logger.Info("Failed to get ObservabilityAddon in hub cluster, requeue with delay", "error", err) + return requeueWithOptionalDelay(err), nil } - r.HubClient = hubClient - hubObsAddon = obsAddon - } - // Fetch the ObservabilityAddon instance in local cluster - obsAddon := &oav1beta1.ObservabilityAddon{} - err = r.Client.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: namespace}, obsAddon) - if err != nil { - log.Error(err, "Failed to get observabilityaddon", "namespace", namespace) return ctrl.Result{}, err } - hubObsAddon.Status = obsAddon.Status + // Retry on conflict as operation happens in other cluster + // on a shared resource that can be updated by multiple controllers. + retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + // Fetch the ObservabilityAddon instance in local cluster + obsAddon := &oav1beta1.ObservabilityAddon{} + if err != r.Client.Get(ctx, types.NamespacedName{Name: r.ObsAddonName, Namespace: r.Namespace}, obsAddon) { + return err + } - err = r.HubClient.Status().Update(ctx, hubObsAddon) - if err != nil { - log.Error(err, "Failed to update status for observabilityaddon in hub cluster", "namespace", hubNamespace) + // Only update the status in hub cluster if needed + if reflect.DeepEqual(hubObsAddon.Status, obsAddon.Status) { + return nil + } + + updatedAddon := hubObsAddon.DeepCopy() + updatedAddon.Status = obsAddon.Status + + // Update the status in hub cluster + return r.HubClient.Status().Update(ctx, updatedAddon) + }) + if retryErr != nil { + if isTransientErr(retryErr) || errors.IsConflict(retryErr) { + r.Logger.Info("Retryable error while updating status, request will be retried.", "error", retryErr) + return requeueWithOptionalDelay(retryErr), nil + } + + return ctrl.Result{}, fmt.Errorf("failed to update status in hub cluster: %w", retryErr) } return ctrl.Result{}, nil @@ -81,16 +98,12 @@ func (r *StatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // SetupWithManager sets up the controller with the Manager. func (r *StatusReconciler) SetupWithManager(mgr ctrl.Manager) error { - if os.Getenv("NAMESPACE") != "" { - namespace = os.Getenv("NAMESPACE") - } - pred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { return false }, UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectNew.GetNamespace() == namespace && + if e.ObjectNew.GetNamespace() == r.Namespace && !reflect.DeepEqual(e.ObjectNew.(*oav1beta1.ObservabilityAddon).Status, e.ObjectOld.(*oav1beta1.ObservabilityAddon).Status) { return true @@ -106,3 +119,44 @@ func (r *StatusReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&oav1beta1.ObservabilityAddon{}, builder.WithPredicates(pred)). Complete(r) } + +// isTransientErr checks if the error is a transient error +// This suggests that a retry (without any change) might be successful +func isTransientErr(err error) bool { + if _, ok := err.(net.Error); ok { + return true + } + + if statusErr, ok := err.(*errors.StatusError); ok { + code := statusErr.Status().Code + if code >= 500 && code < 600 && code != 501 { + return true + } + } + + return errors.IsTimeout(err) || errors.IsServerTimeout(err) || errors.IsTooManyRequests(err) +} + +// isAuthOrConnectionErr checks if the error is an authentication error or a connection error +// This suggests an issue with the client configuration and a reload might be needed +func isAuthOrConnectionErr(err error) bool { + if errors.IsUnauthorized(err) || errors.IsForbidden(err) || errors.IsTimeout(err) { + return true + } + + if _, ok := err.(net.Error); ok { + return true + } + + return false +} + +// requeueWithOptionalDelay requeues the request with a delay if suggested by the error +// Otherwise, it requeues the request without a delay +func requeueWithOptionalDelay(err error) ctrl.Result { + if delay, ok := errors.SuggestsClientDelay(err); ok { + return ctrl.Result{RequeueAfter: time.Duration(delay) * time.Second} + } + + return ctrl.Result{Requeue: true} +} diff --git a/operators/endpointmetrics/controllers/status/status_controller_integration_test.go b/operators/endpointmetrics/controllers/status/status_controller_integration_test.go new file mode 100644 index 000000000..66f252583 --- /dev/null +++ b/operators/endpointmetrics/controllers/status/status_controller_integration_test.go @@ -0,0 +1,201 @@ +// Copyright (c) Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +// Licensed under the Apache License 2.0 + +//go:build integration + +package status + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + kubescheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestIntegrationReconcileStatus(t *testing.T) { + spokeNamespace := "test-namespace" + hubNamespace := "hub-namespace" + obsAddonName := "observability-addon" + + testEnv, k8sClient := setupTestEnv(t) + defer testEnv.Stop() + + spokeObsAddon := newObservabilityAddon(obsAddonName, spokeNamespace) + resourcesDeps := []client.Object{ + makeNamespace(spokeNamespace), + spokeObsAddon, + } + if err := createResources(k8sClient, resourcesDeps...); err != nil { + t.Fatalf("Failed to create resources: %v", err) + } + + hubTestEnv, hubK8sClient := setupTestEnv(t) + defer hubTestEnv.Stop() + + resourcesDeps = []client.Object{ + makeNamespace(hubNamespace), + newObservabilityAddon(obsAddonName, hubNamespace), + } + if err := createResources(hubK8sClient, resourcesDeps...); err != nil { + t.Fatalf("Failed to create resources: %v", err) + } + + mgr, err := ctrl.NewManager(testEnv.Config, ctrl.Options{ + Scheme: k8sClient.Scheme(), + }) + assert.NoError(t, err) + + hubClientWithReload, err := util.NewReloadableHubClientWithReloadFunc(func() (client.Client, error) { + return hubK8sClient, nil + }) + assert.NoError(t, err) + reconciler := StatusReconciler{ + Client: k8sClient, + Namespace: spokeNamespace, + HubNamespace: hubNamespace, + ObsAddonName: obsAddonName, + Logger: ctrl.Log.WithName("controllers").WithName("Status"), + HubClient: hubClientWithReload, + } + + err = reconciler.SetupWithManager(mgr) + assert.NoError(t, err) + + go func() { + err = mgr.Start(ctrl.SetupSignalHandler()) + assert.NoError(t, err) + }() + + go func() { + // Update spoke addon status concurrently to trigger the reconcile loop. + addCondition(spokeObsAddon, "Deployed", metav1.ConditionTrue) + err := wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) { + spokeObsAddon.Status.Conditions[0].LastTransitionTime = metav1.Time{ + Time: time.Now(), + } + err := k8sClient.Status().Update(context.Background(), spokeObsAddon) + if err != nil { + return false, err + } + + return true, nil + }) + assert.NoError(t, err) + }() + + // Hub addon status should be updated + err = wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { + hubObsAddon := &oav1beta1.ObservabilityAddon{} + err := hubK8sClient.Get(context.Background(), types.NamespacedName{Name: obsAddonName, Namespace: hubNamespace}, hubObsAddon) + if err != nil { + return false, err + } + + return len(hubObsAddon.Status.Conditions) > 0, nil + }) + + assert.NoError(t, err) +} + +// setupTestEnv starts the test environment (etcd and kube api-server). +func setupTestEnv(t *testing.T) (*envtest.Environment, client.Client) { + scheme := runtime.NewScheme() + kubescheme.AddToScheme(scheme) + oav1beta1.AddToScheme(scheme) + + addonCrdYamlData, err := os.ReadFile("../../config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml") + if err != nil { + t.Fatalf("Failed to read CRD file: %v", err) + } + + dec := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + var crd apiextensionsv1.CustomResourceDefinition + _, _, err = dec.Decode(addonCrdYamlData, nil, &crd) + if err != nil { + t.Fatalf("Failed to decode CRD: %v", err) + } + + testEnv := &envtest.Environment{ + CRDs: []*apiextensionsv1.CustomResourceDefinition{&crd}, + } + + cfg, err := testEnv.Start() + if err != nil { + t.Fatal(err) + } + + k8sClient, err := client.New(cfg, client.Options{Scheme: scheme}) + if err != nil { + t.Fatal(err) + } + + opts := zap.Options{ + Development: true, + } + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + return testEnv, k8sClient +} + +func makeNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +// createResources creates the given resources in the cluster. +func createResources(client client.Client, resources ...client.Object) error { + for _, resource := range resources { + if err := client.Create(context.Background(), resource); err != nil { + return err + } + } + return nil +} + +func newObservabilityAddon(name string, ns string) *oav1beta1.ObservabilityAddon { + return &oav1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: oashared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 60, + }, + } +} + +func addCondition(oba *oav1beta1.ObservabilityAddon, statusType string, status metav1.ConditionStatus) { + condition := oav1beta1.StatusCondition{ + Type: statusType, + Status: status, + Reason: "DummyReason", + Message: "DummyMessage", + LastTransitionTime: metav1.Time{ + Time: time.Now(), + }, + } + oba.Status.Conditions = append(oba.Status.Conditions, condition) +} diff --git a/operators/endpointmetrics/controllers/status/status_controller_test.go b/operators/endpointmetrics/controllers/status/status_controller_test.go index 9dbde648d..0d689c284 100644 --- a/operators/endpointmetrics/controllers/status/status_controller_test.go +++ b/operators/endpointmetrics/controllers/status/status_controller_test.go @@ -2,20 +2,27 @@ // Copyright Contributors to the Open Cluster Management project // Licensed under the Apache License 2.0 -package status +package status_test import ( "context" - "os" + "fmt" + "net" + "reflect" "testing" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/go-logr/logr" + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/controllers/status" "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" @@ -23,98 +30,317 @@ import ( ) const ( - name = "observability-addon" - testNamespace = "test-ns" - testHubNamspace = "test-hub-ns" + name = "observability-addon" + testNamespace = "test-ns" + testHubNamespace = "test-hub-ns" + obAddonName = "observability-addon" ) -func newObservabilityAddon(name string, ns string) *oav1beta1.ObservabilityAddon { - return &oav1beta1.ObservabilityAddon{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Namespace: ns, +func TestStatusController_NominalCase(t *testing.T) { + spokeOba := newObservabilityAddon(name, testNamespace) + c := newClient(spokeOba) + + hubOba := newObservabilityAddon(name, testHubNamespace) + hubOba.Spec.Interval = 12341 // add variation in the spec, not status + custumHubClient := newClientWithUpdateError(newClient(hubOba), nil, nil) + r := newStatusReconciler(c, func() (client.Client, error) { return custumHubClient, nil }) + + // no status difference triggers no update + resp, err := r.Reconcile(context.Background(), newRequest()) + if err != nil { + t.Fatalf("Failed to reconcile: %v", err) + } + if !reflect.DeepEqual(resp, ctrl.Result{}) { + t.Fatalf("Expected no requeue") + } + if custumHubClient.UpdateCallsCount() > 0 { + t.Fatalf("Expected no update") + } + + // update status in spoke + addCondition(spokeOba, "Deployed", metav1.ConditionTrue) + err = c.Update(context.Background(), spokeOba) + if err != nil { + t.Fatalf("Failed to update status in spoke: %v", err) + } + + // status difference should trigger update in hub + resp, err = r.Reconcile(context.Background(), newRequest()) + if err != nil { + t.Fatalf("Failed to reconcile: %v", err) + } + if !reflect.DeepEqual(resp, ctrl.Result{}) { + t.Fatalf("Expected no requeue") + } + if custumHubClient.UpdateCallsCount() != 1 { + t.Fatalf("Expected update") + } + + // check status in hub + hubObsAddon := &oav1beta1.ObservabilityAddon{} + err = custumHubClient.Get(context.Background(), types.NamespacedName{Name: obAddonName, Namespace: testHubNamespace}, hubObsAddon) + if err != nil { + t.Fatalf("Failed to get oba in hub: %v", err) + } + if !reflect.DeepEqual(hubObsAddon.Status.Conditions, spokeOba.Status.Conditions) { + t.Fatalf("Status not updated in hub: %v", hubObsAddon.Status) + } +} + +func TestStatusController_UpdateHubAddonFailures(t *testing.T) { + spokeOba := newObservabilityAddon(name, testNamespace) + addCondition(spokeOba, "Deployed", metav1.ConditionTrue) // add status to trigger update + c := newClient(spokeOba) + + hubOba := newObservabilityAddon(name, testHubNamespace) + var updateErr error + hubClientWithConflict := newClientWithUpdateError(newClient(hubOba), updateErr, nil) + r := newStatusReconciler(c, func() (client.Client, error) { return hubClientWithConflict, nil }) + + testCases := map[string]struct { + updateErr error + reconcileErr error + requeue bool + requeueAfter bool + requeueAfterVal int + updateCallsMin int + updateCallsMax int + }{ + "Conflict": { + updateErr: errors.NewConflict(schema.GroupResource{Group: oav1beta1.GroupVersion.Group, Resource: "FakeResource"}, name, fmt.Errorf("fake conflict")), + requeue: true, + updateCallsMin: 1, }, - Spec: oashared.ObservabilityAddonSpec{ - EnableMetrics: true, - Interval: 60, + "Server unavailable": { + updateErr: errors.NewServiceUnavailable("service unavailable"), + requeue: true, + updateCallsMax: 1, + }, + "internal error": { + updateErr: errors.NewInternalError(fmt.Errorf("internal error")), + // reconcileErr: errors.NewInternalError(fmt.Errorf("fake internal error")), + updateCallsMax: 1, + requeue: true, + }, + "Permanent error": { + updateErr: errors.NewBadRequest("bad request"), + reconcileErr: errors.NewBadRequest("bad request"), + updateCallsMax: 1, + }, + "Too many requests": { + updateErr: errors.NewTooManyRequests("too many requests", 10), + requeueAfter: true, + requeueAfterVal: 10, + updateCallsMax: 1, + }, + "Network error": { + updateErr: &net.DNSError{ + Err: "network error", + }, + requeue: true, + updateCallsMax: 1, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + hubClientWithConflict.UpdateError = tc.updateErr + hubClientWithConflict.Reset() + resp, err := r.Reconcile(context.Background(), newRequest()) + if (tc.reconcileErr != nil && err == nil) || (tc.reconcileErr == nil && err != nil) { + t.Fatalf("Invalid reconcile error: got %v, expected %v", err, tc.reconcileErr) + } + if tc.requeue != resp.Requeue { + t.Fatalf("Invalid requeue: got %v, expected %v", resp.Requeue, tc.requeue) + } + if tc.requeueAfter != (resp.RequeueAfter > 0) { + t.Fatalf("Invalid requeue after: got %v, expected %v", resp.RequeueAfter > 0, tc.requeueAfter) + } + if tc.requeueAfterVal > 0 && int(resp.RequeueAfter.Seconds()) != tc.requeueAfterVal { + t.Fatalf("Invalid requeue after value: got %v, expected %v", int(resp.RequeueAfter.Seconds()), tc.requeueAfterVal) + } + if tc.updateCallsMin > 0 && hubClientWithConflict.UpdateCallsCount() < tc.updateCallsMin { + t.Fatalf("Expected update retry min %d times, got %d", tc.updateCallsMin, hubClientWithConflict.UpdateCallsCount()) + } + if tc.updateCallsMax > 0 && hubClientWithConflict.UpdateCallsCount() > tc.updateCallsMax { + t.Fatalf("Expected update retry at most %d times, got %d", tc.updateCallsMax, hubClientWithConflict.UpdateCallsCount()) + } + }) + } +} + +func TestStatusController_GetHubAddonFailures(t *testing.T) { + spokeOba := newObservabilityAddon(name, testNamespace) + addCondition(spokeOba, "Deployed", metav1.ConditionTrue) // add status to trigger update + c := newClient(spokeOba) + + hubOba := newObservabilityAddon(name, testHubNamespace) + hubClientWithConflict := newClientWithUpdateError(newClient(hubOba), nil, nil) + var reloadCount int + r := newStatusReconciler(c, func() (client.Client, error) { + reloadCount++ + return hubClientWithConflict, nil + }) + + testCases := map[string]struct { + getErr error + reconcileErr error + requeue bool + requeueAfter bool + requeueAfterVal int + reloadCount int + }{ + "Unauthorized": { + getErr: errors.NewUnauthorized("unauthorized"), + requeue: true, + reloadCount: 1, + }, + "Permanent error": { + getErr: errors.NewBadRequest("bad request"), + reconcileErr: errors.NewBadRequest("bad request"), }, + "Servers unavailable": { + getErr: errors.NewServiceUnavailable("service unavailable"), + requeue: true, + }, + "Too many requests": { + getErr: errors.NewTooManyRequests("too many requests", 10), + requeueAfter: true, + requeueAfterVal: 10, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + hubClientWithConflict.GetError = tc.getErr + reloadCount = 0 + // hubClientWithConflict.Reset() + resp, err := r.Reconcile(context.Background(), newRequest()) + if (tc.reconcileErr != nil && err == nil) || (tc.reconcileErr == nil && err != nil) { + t.Fatalf("Invalid reconcile error: got %v, expected %v", err, tc.reconcileErr) + } + if tc.requeue != resp.Requeue { + t.Fatalf("Invalid requeue: got %v, expected %v", resp.Requeue, tc.requeue) + } + if tc.requeueAfter != (resp.RequeueAfter > 0) { + t.Fatalf("Invalid requeue after: got %v, expected %v", resp.RequeueAfter > 0, tc.requeueAfter) + } + if tc.requeueAfterVal > 0 && int(resp.RequeueAfter.Seconds()) != tc.requeueAfterVal { + t.Fatalf("Invalid requeue after value: got %v, expected %v", int(resp.RequeueAfter.Seconds()), tc.requeueAfterVal) + } + if tc.reloadCount != reloadCount { + t.Fatalf("Expected reload %d times, got %d", tc.reloadCount, reloadCount) + } + }) } } -func init() { - os.Setenv("UNIT_TEST", "true") +func newClient(objs ...runtime.Object) client.Client { s := scheme.Scheme addonv1alpha1.AddToScheme(s) oav1beta1.AddToScheme(s) - namespace = testNamespace - hubNamespace = testHubNamspace + return fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build() } -func TestStatusController(t *testing.T) { - - hubClient := fake.NewClientBuilder().Build() - util.SetHubClient(hubClient) - c := fake.NewClientBuilder().Build() +// TestClient wraps a client.Client to customize operations for testing +type TestClient struct { + client.Client + UpdateError error + GetError error + updateCallsCount int + statusWriter *TestStatusWriter +} - r := &StatusReconciler{ - Client: c, - HubClient: hubClient, +func newClientWithUpdateError(c client.Client, updateError, getError error) *TestClient { + ret := &TestClient{ + Client: c, + UpdateError: updateError, + GetError: getError, } + ret.statusWriter = &TestStatusWriter{SubResourceWriter: c.Status(), updateError: &ret.UpdateError, callsCount: &ret.updateCallsCount} + return ret +} - // test error in reconcile if missing obervabilityaddon - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "install", - Namespace: testNamespace, - }, - } - ctx := context.TODO() - _, err := r.Reconcile(ctx, req) - if err == nil { - t.Fatalf("reconcile: miss the error for missing obervabilityaddon") +func (c *TestClient) Status() client.StatusWriter { + return c.statusWriter +} + +func (c *TestClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.GetError != nil { + return c.GetError } + return c.Client.Get(ctx, key, obj) +} - // test status in local pushed to hub - err = hubClient.Create(ctx, newObservabilityAddon(name, testHubNamspace)) - if err != nil { - t.Fatalf("failed to create hub oba to install: (%v)", err) +func (c *TestClient) UpdateCallsCount() int { + return c.updateCallsCount +} + +func (c *TestClient) Reset() { + c.updateCallsCount = 0 +} + +type TestStatusWriter struct { + client.SubResourceWriter + updateError *error + callsCount *int +} + +func (f *TestStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + *f.callsCount++ + + if *f.updateError != nil { + return *f.updateError } - oba := newObservabilityAddon(name, testNamespace) - oba.Status = oav1beta1.ObservabilityAddonStatus{ - Conditions: []oav1beta1.StatusCondition{ - { - Type: "Deployed", - Status: metav1.ConditionTrue, - Reason: "Deployed", - Message: "Metrics collector deployed", - }, + return f.SubResourceWriter.Update(ctx, obj, opts...) +} + +func newObservabilityAddon(name string, ns string) *oav1beta1.ObservabilityAddon { + return &oav1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: oashared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 60, }, } - err = c.Create(ctx, oba) - if err != nil { - t.Fatalf("failed to create oba to install: (%v)", err) +} + +func addCondition(oba *oav1beta1.ObservabilityAddon, statusType string, status metav1.ConditionStatus) { + condition := oav1beta1.StatusCondition{ + Type: statusType, + Status: status, + Reason: "DummyReason", + Message: "DummyMessage", } - req = ctrl.Request{ + oba.Status.Conditions = append(oba.Status.Conditions, condition) +} + +func newRequest() ctrl.Request { + return ctrl.Request{ NamespacedName: types.NamespacedName{ Name: "install", Namespace: testNamespace, }, } - _, err = r.Reconcile(ctx, req) - if err != nil { - t.Fatalf("Failed to reconcile: (%v)", err) - } - hubObsAddon := &oav1beta1.ObservabilityAddon{} - err = hubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: testHubNamspace}, hubObsAddon) +} + +func newStatusReconciler(c client.Client, hubReload func() (client.Client, error)) *status.StatusReconciler { + hc, err := util.NewReloadableHubClientWithReloadFunc(hubReload) if err != nil { - t.Fatalf("Failed to get oba in hub: (%v)", err) + panic(err) } - if hubObsAddon.Status.Conditions == nil || len(hubObsAddon.Status.Conditions) != 1 { - t.Fatalf("No correct status set in hub observabilityaddon: (%v)", hubObsAddon) - } else if hubObsAddon.Status.Conditions[0].Type != "Deployed" { - t.Fatalf("Wrong status type: (%v)", hubObsAddon.Status) + return &status.StatusReconciler{ + Client: c, + HubClient: hc, + Namespace: testNamespace, + HubNamespace: testHubNamespace, + ObsAddonName: obAddonName, + Logger: logr.Discard(), } } diff --git a/operators/endpointmetrics/main.go b/operators/endpointmetrics/main.go index 6aa9ba5ec..316061de6 100644 --- a/operators/endpointmetrics/main.go +++ b/operators/endpointmetrics/main.go @@ -122,7 +122,7 @@ func main() { os.Exit(1) } - hubClient, err := util.GetOrCreateHubClient(false, scheme) + hubClientWithReload, err := util.NewReloadableHubClient(os.Getenv("HUB_KUBECONFIG"), mgr.GetScheme()) if err != nil { setupLog.Error(err, "Failed to create the hub client") os.Exit(1) @@ -131,15 +131,23 @@ func main() { if err = (&obsepctl.ObservabilityAddonReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - HubClient: hubClient, + HubClient: hubClientWithReload, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ObservabilityAddon") os.Exit(1) } + + namespace := os.Getenv("NAMESPACE") + if namespace == "" { + namespace = os.Getenv("WATCH_NAMESPACE") + } if err = (&statusctl.StatusReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - HubClient: hubClient, + Client: mgr.GetClient(), + HubClient: hubClientWithReload, + Namespace: namespace, + HubNamespace: os.Getenv("HUB_NAMESPACE"), + ObsAddonName: "observability-addon", + Logger: ctrl.Log.WithName("controllers").WithName("Status"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Status") os.Exit(1) diff --git a/operators/endpointmetrics/pkg/util/client.go b/operators/endpointmetrics/pkg/util/client.go index 03904979c..907cfbdc1 100644 --- a/operators/endpointmetrics/pkg/util/client.go +++ b/operators/endpointmetrics/pkg/util/client.go @@ -5,51 +5,65 @@ package util import ( - "context" - "os" + "fmt" oav1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - ocpClientSet "github.com/openshift/client-go/config/clientset/versioned" - oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" ) -const ( - obAddonName = "observability-addon" -) +// ReloadableHubClient is a wrapper around the hub client that allows reloading the client. +// This is useful when the kubeconfig file is updated. +type ReloadableHubClient struct { + client.Client + reload func() (client.Client, error) +} -var ( - hubClient client.Client - ocpClient ocpClientSet.Interface -) +// NewReloadableHubClient creates a new hub client with a reload function. +func NewReloadableHubClient(filePath string, clientScheme *runtime.Scheme) (*ReloadableHubClient, error) { + reload := func() (client.Client, error) { + return newHubClient(filePath, clientScheme) + } -var ( - log = ctrl.Log.WithName("util") - hubKubeConfigPath = os.Getenv("HUB_KUBECONFIG") -) + hubClient, err := reload() + if err != nil { + return nil, fmt.Errorf("failed to create the hub client: %w", err) + } + return &ReloadableHubClient{Client: hubClient, reload: reload}, nil +} -// GetOrCreateOCPClient get an existing hub client or create new one if it doesn't exist. -func GetOrCreateHubClient(renew bool, clientScheme *runtime.Scheme) (client.Client, error) { - if os.Getenv("UNIT_TEST") == "true" { - return hubClient, nil +// NewReloadableHubClientWithReloadFunc creates a new hub client with a reload function. +// The reload function is called when the Reload method is called. +// This can be handy for testing purposes. +func NewReloadableHubClientWithReloadFunc(reload func() (client.Client, error)) (*ReloadableHubClient, error) { + hubClient, err := reload() + if err != nil { + return nil, fmt.Errorf("failed to create the hub client: %w", err) } + return &ReloadableHubClient{Client: hubClient, reload: reload}, nil +} - if !renew && hubClient != nil { - return hubClient, nil +// Reload reloads the hub client and returns a new instance of HubClientWithReload. +// HubClientWithReload is immutable. +func (c *ReloadableHubClient) Reload() (*ReloadableHubClient, error) { + hubClient, err := c.reload() + if err != nil { + return nil, fmt.Errorf("failed to reload the hub client: %w", err) } + + return &ReloadableHubClient{Client: hubClient, reload: c.reload}, nil +} + +func newHubClient(filePath string, clientScheme *runtime.Scheme) (client.Client, error) { // create the config from the path - config, err := clientcmd.BuildConfigFromFlags("", hubKubeConfigPath) + config, err := clientcmd.BuildConfigFromFlags("", filePath) if err != nil { - log.Error(err, "Failed to create the config") - return nil, err + return nil, fmt.Errorf("failed to create the config: %w", err) } if clientScheme == nil { @@ -64,57 +78,9 @@ func GetOrCreateHubClient(renew bool, clientScheme *runtime.Scheme) (client.Clie // generate the client based off of the config hubClient, err := client.New(config, client.Options{Scheme: clientScheme}) - - if err != nil { - log.Error(err, "Failed to create hub client") - return nil, err - } - - return hubClient, err -} - -// GetOrCreateOCPClient get an existing ocp client or create new one if it doesn't exist. -func GetOrCreateOCPClient() (ocpClientSet.Interface, error) { - if ocpClient != nil { - return ocpClient, nil - } - // create the config from the path - config, err := clientcmd.BuildConfigFromFlags("", "") - if err != nil { - log.Error(err, "Failed to create the config") - return nil, err - } - - // generate the client based off of the config - ocpClient, err = ocpClientSet.NewForConfig(config) - if err != nil { - log.Error(err, "Failed to create ocp config client") - return nil, err - } - - return ocpClient, err -} - -func SetHubClient(c client.Client) { - hubClient = c -} - -func RenewAndRetry(ctx context.Context, scheme *runtime.Scheme) (client.Client, *oav1beta1.ObservabilityAddon, error) { - // try to renew the hub client - log.Info("renew hub client") - hubClient, err := GetOrCreateHubClient(true, scheme) - if err != nil { - log.Error(err, "Failed to create the hub client") - return nil, nil, err - } - - hubObsAddon := &oav1beta1.ObservabilityAddon{} - hubNamespace := os.Getenv("HUB_NAMESPACE") - err = hubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon) if err != nil { - log.Error(err, "Failed to get observabilityaddon in hub cluster", "namespace", hubNamespace) - return nil, nil, err + return nil, fmt.Errorf("failed to create hub client: %w", err) } - return hubClient, hubObsAddon, nil + return hubClient, nil } diff --git a/operators/endpointmetrics/pkg/util/client_test.go b/operators/endpointmetrics/pkg/util/client_test.go deleted file mode 100644 index 24dc430e2..000000000 --- a/operators/endpointmetrics/pkg/util/client_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project -// Licensed under the Apache License 2.0 - -package util - -import ( - "context" - "os" - "testing" - - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" -) - -func init() { - os.Setenv("UNIT_TEST", "true") - os.Setenv("HUB_NAMESPACE", testNamespace) - s := scheme.Scheme - oav1beta1.AddToScheme(s) -} - -func TestRenewAndRetry(t *testing.T) { - hubClient := fake.NewClientBuilder().Build() - SetHubClient(hubClient) - _, _, err := RenewAndRetry(context.TODO(), nil) - if err == nil { - t.Fatal("missing error") - } - - hubClient1 := fake.NewClientBuilder().WithRuntimeObjects(newObservabilityAddon(name, testNamespace)).Build() - SetHubClient(hubClient1) - _, _, err = RenewAndRetry(context.TODO(), nil) - if err != nil { - t.Fatalf("Error caught: %v", err) - } -} diff --git a/operators/endpointmetrics/pkg/util/lease.go b/operators/endpointmetrics/pkg/util/lease.go index e0c2ea0ce..b408a11f2 100644 --- a/operators/endpointmetrics/pkg/util/lease.go +++ b/operators/endpointmetrics/pkg/util/lease.go @@ -11,6 +11,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" "open-cluster-management.io/addon-framework/pkg/lease" ) @@ -20,8 +21,10 @@ const ( ) var ( - namespace = os.Getenv("WATCH_NAMESPACE") - clusterName = os.Getenv("HUB_NAMESPACE") + namespace = os.Getenv("WATCH_NAMESPACE") + clusterName = os.Getenv("HUB_NAMESPACE") + log = ctrl.Log.WithName("util") + hubKubeConfigPath = os.Getenv("HUB_KUBECONFIG") ) func StartLease() { From e90911c5bf26bdb027e2e7d84df10b282a61caa1 Mon Sep 17 00:00:00 2001 From: Douglas Camata <159076+douglascamata@users.noreply.github.com> Date: Thu, 2 May 2024 13:15:56 +0200 Subject: [PATCH 10/33] [ACM-10706] Add support for custom alertmanager url (#1419) * Add support for custom alertmanager hub URL Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Add some missing contexts args Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Add tests for `GetAlertmanagerEndpoint` Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --------- Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --- .../multiclusterobservability_types.go | 5 ++ ...gement.io_multiclusterobservabilities.yaml | 5 ++ ...gement.io_multiclusterobservabilities.yaml | 7 +++ .../placementrule/hub_info_secret.go | 5 +- .../pkg/certificates/certificates.go | 7 ++- .../pkg/config/config.go | 26 ++++++-- .../pkg/config/config_test.go | 59 +++++++++++++++++-- 7 files changed, 100 insertions(+), 14 deletions(-) diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go index 20d257339..accea4da1 100644 --- a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go @@ -47,6 +47,11 @@ type AdvancedConfig struct { // For the metrics-collector that runs in the hub this setting has no effect. // +optional CustomObservabilityHubURL observabilityshared.URL `json:"customObservabilityHubURL,omitempty"` + // CustomAlertmanagerHubURL overrides the alertmanager URL to send alerts from the spoke + // to the hub server. + // For the alertmanager that runs in the hub this setting has no effect. + // +optional + CustomAlertmanagerHubURL observabilityshared.URL `json:"customAlertmanagerHubURL,omitempty"` // The spec of the data retention configurations // +optional RetentionConfig *RetentionConfig `json:"retentionConfig,omitempty"` diff --git a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml index 004f881f9..473330a3e 100644 --- a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -1148,6 +1148,11 @@ spec: description: Annotations is an unstructured key value map stored with a service account type: object type: object + customAlertmanagerHubURL: + description: CustomAlertmanagerHubURL overrides the alertmanager URL to send alerts from the spoke to the hub server. For the alertmanager that runs in the hub this setting has no effect. + maxLength: 2083 + pattern: ^https?:\/\/ + type: string customObservabilityHubURL: description: CustomObservabilityHubURL overrides the endpoint used by the metrics-collector to send metrics to the hub server. For the metrics-collector that runs in the hub this setting has no effect. maxLength: 2083 diff --git a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml index 409242278..7534e7de6 100644 --- a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -1761,6 +1761,13 @@ spec: stored with a service account type: object type: object + customAlertmanagerHubURL: + description: CustomAlertmanagerHubURL overrides the alertmanager + URL to send alerts from the spoke to the hub server. For the + alertmanager that runs in the hub this setting has no effect. + maxLength: 2083 + pattern: ^https?:\/\/ + type: string customObservabilityHubURL: description: CustomObservabilityHubURL overrides the endpoint used by the metrics-collector to send metrics to the hub server. diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go index c36cefb0a..3ece1f486 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go @@ -5,6 +5,7 @@ package placementrule import ( + "context" "net/url" "gopkg.in/yaml.v2" @@ -27,7 +28,7 @@ func generateHubInfoSecret(client client.Client, obsNamespace string, if ingressCtlCrdExists { var err error - obsApiRouteHost, err = config.GetObsAPIHost(client, obsNamespace) + obsApiRouteHost, err = config.GetObsAPIHost(context.TODO(), client, obsNamespace) if err != nil { log.Error(err, "Failed to get the host for observatorium API route") return nil, err @@ -35,7 +36,7 @@ func generateHubInfoSecret(client client.Client, obsNamespace string, // if alerting is disabled, do not set alertmanagerEndpoint if !config.IsAlertingDisabled() { - alertmanagerEndpoint, err = config.GetAlertmanagerEndpoint(client, obsNamespace) + alertmanagerEndpoint, err = config.GetAlertmanagerEndpoint(context.TODO(), client, obsNamespace) if err != nil { log.Error(err, "Failed to get alertmanager endpoint") return nil, err diff --git a/operators/multiclusterobservability/pkg/certificates/certificates.go b/operators/multiclusterobservability/pkg/certificates/certificates.go index 6e80a48fd..d942ae099 100644 --- a/operators/multiclusterobservability/pkg/certificates/certificates.go +++ b/operators/multiclusterobservability/pkg/certificates/certificates.go @@ -16,9 +16,10 @@ import ( "net" "time" - operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" certificatesv1 "k8s.io/api/certificates/v1" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -460,7 +461,7 @@ func pemEncode(cert []byte, key []byte) (*bytes.Buffer, *bytes.Buffer) { func getHosts(c client.Client, ingressCtlCrdExists bool) ([]string, error) { hosts := []string{config.GetObsAPISvc(config.GetOperandName(config.Observatorium))} if ingressCtlCrdExists { - url, err := config.GetObsAPIHost(c, config.GetDefaultNamespace()) + url, err := config.GetObsAPIHost(context.TODO(), c, config.GetDefaultNamespace()) if err != nil { log.Error(err, "Failed to get api route address") return nil, err @@ -515,7 +516,7 @@ func CreateUpdateMtlsCertSecretForHubCollector(c client.Client, updateMtlsCert b log.Error(nil, "failed to sign CSR") return errors.NewBadRequest("failed to sign CSR") } - //Create a secret + // Create a secret HubMtlsSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: operatorconfig.HubMetricsCollectorMtlsCert, diff --git a/operators/multiclusterobservability/pkg/config/config.go b/operators/multiclusterobservability/pkg/config/config.go index 3c0c5922c..9ad6ec534 100644 --- a/operators/multiclusterobservability/pkg/config/config.go +++ b/operators/multiclusterobservability/pkg/config/config.go @@ -481,9 +481,9 @@ func GetDefaultTenantName() string { } // GetObsAPIHost is used to get the URL for observartium api gateway. -func GetObsAPIHost(client client.Client, namespace string) (string, error) { +func GetObsAPIHost(ctx context.Context, client client.Client, namespace string) (string, error) { mco := &observabilityv1beta2.MultiClusterObservability{} - err := client.Get(context.TODO(), + err := client.Get(ctx, types.NamespacedName{ Name: GetMonitoringCRName(), }, mco) @@ -532,10 +532,26 @@ func GetMCONamespace() string { } // GetAlertmanagerEndpoint is used to get the URL for alertmanager. -func GetAlertmanagerEndpoint(client client.Client, namespace string) (string, error) { - found := &routev1.Route{} +func GetAlertmanagerEndpoint(ctx context.Context, client client.Client, namespace string) (string, error) { + mco := &observabilityv1beta2.MultiClusterObservability{} + err := client.Get(ctx, + types.NamespacedName{ + Name: GetMonitoringCRName(), + }, mco) + if err != nil && !errors.IsNotFound(err) { + return "", err + } + advancedConfig := mco.Spec.AdvancedConfig + if advancedConfig != nil && advancedConfig.CustomAlertmanagerHubURL != "" { + err := advancedConfig.CustomAlertmanagerHubURL.Validate() + if err != nil { + return "", err + } + return string(advancedConfig.CustomAlertmanagerHubURL), nil + } - err := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteName, Namespace: namespace}, found) + found := &routev1.Route{} + err = client.Get(ctx, types.NamespacedName{Name: AlertmanagerRouteName, Namespace: namespace}, found) if err != nil && errors.IsNotFound(err) { // if the alertmanager router is not created yet, fallback to get host from the domain of ingresscontroller domain, err := getDomainForIngressController( diff --git a/operators/multiclusterobservability/pkg/config/config_test.go b/operators/multiclusterobservability/pkg/config/config_test.go index 3ad4552a5..407919465 100644 --- a/operators/multiclusterobservability/pkg/config/config_test.go +++ b/operators/multiclusterobservability/pkg/config/config_test.go @@ -5,6 +5,7 @@ package config import ( + "context" "fmt" "os" "reflect" @@ -266,12 +267,12 @@ func TestGetObsAPIHost(t *testing.T) { scheme.AddKnownTypes(mcov1beta2.GroupVersion, &mcov1beta2.MultiClusterObservability{}) client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route).Build() - host, _ := GetObsAPIHost(client, "default") + host, _ := GetObsAPIHost(context.TODO(), client, "default") if host == apiServerURL { t.Errorf("Should not get route host in default namespace") } - host, _ = GetObsAPIHost(client, "test") + host, _ = GetObsAPIHost(context.TODO(), client, "test") if host != apiServerURL { t.Errorf("Observatorium api (%v) is not the expected (%v)", host, apiServerURL) } @@ -288,18 +289,68 @@ func TestGetObsAPIHost(t *testing.T) { }, } client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() - host, _ = GetObsAPIHost(client, "test") + host, _ = GetObsAPIHost(context.TODO(), client, "test") if host != customBaseURL { t.Errorf("Observatorium api (%v) is not the expected (%v)", host, customBaseURL) } mco.Spec.AdvancedConfig.CustomObservabilityHubURL = "httpa://foob ar.c" client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() - _, err := GetObsAPIHost(client, "test") + _, err := GetObsAPIHost(context.TODO(), client, "test") if err == nil { t.Errorf("expected error when parsing URL '%v', but got none", mco.Spec.AdvancedConfig.CustomObservabilityHubURL) } +} + +func TestGetAlertmanagerEndpoint(t *testing.T) { + routeURL := "http://route.example.com" + route := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: AlertmanagerRouteName, + Namespace: "test", + }, + Spec: routev1.RouteSpec{ + Host: routeURL, + }, + } + scheme := runtime.NewScheme() + scheme.AddKnownTypes(routev1.GroupVersion, route) + scheme.AddKnownTypes(mcov1beta2.GroupVersion, &mcov1beta2.MultiClusterObservability{}) + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route).Build() + + host, _ := GetAlertmanagerEndpoint(context.TODO(), client, "default") + if host == routeURL { + t.Errorf("Should not get route host in default namespace") + } + + host, _ = GetAlertmanagerEndpoint(context.TODO(), client, "test") + if host != routeURL { + t.Errorf("Alertmanager URL (%v) is not the expected (%v)", host, routeURL) + } + customBaseURL := "https://custom.base/url" + mco := &mcov1beta2.MultiClusterObservability{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetMonitoringCRName(), + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + AdvancedConfig: &mcov1beta2.AdvancedConfig{ + CustomAlertmanagerHubURL: mcoshared.URL(customBaseURL), + }, + }, + } + client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() + host, _ = GetAlertmanagerEndpoint(context.TODO(), client, "test") + if host != customBaseURL { + t.Errorf("Alertmanager URL (%v) is not the expected (%v)", host, customBaseURL) + } + + mco.Spec.AdvancedConfig.CustomAlertmanagerHubURL = "httpa://foob ar.c" + client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() + _, err := GetAlertmanagerEndpoint(context.TODO(), client, "test") + if err == nil { + t.Errorf("expected error when parsing URL '%v', but got none", mco.Spec.AdvancedConfig.CustomObservabilityHubURL) + } } func TestIsPaused(t *testing.T) { From 6edaad815af5e213c888aebc7811b043742e00c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Tue, 7 May 2024 15:52:33 +0200 Subject: [PATCH 11/33] Add required permissions for simulated cluster & various fixes for metrics generation (#1425) * metrics-collector simulator: add missing perms A couple of additional permissions seems to be needed for the metrics collector simulator to work correctly. This commit adds the needed clusterrole and rolebinding. Signed-off-by: Jacob Baungard Hansen * metrics-collector simulator: metrics generator fix This commit contains various changes to make the generation of metrics for the metrics-collector simulator work. - For getting recording rules, use jq to urlencode data when querying in-cluster prometheus. Before we removed spaces from the rules, but that caused problems with rules that might contain two keywords after each other such as `.. or sum(..` - Don't use `ROOTDIR` as it could be undefined, and just use `WORKDIR` - Fix installation of various tools that used the misspelled `WORK_DIR` variable and therefore didn't work - Install gojsontoyaml from release tarballs instead of via Go (which had some problems on my system) - Update jq to 1.7.1 - Potentially make tools downloads work for arm macs (untested) Signed-off-by: Jacob Baungard Hansen * metrics-collector simulator: use install-binaries Use the common install binaries script to install binaries instead of the custom solution in these files. Signed-off-by: Jacob Baungard Hansen * install-binaries: bump jq & fix jq on arm macs Bump jq to latest release 1.7.1. This should fix jq install on arm64 macs, as the previous 1.6 release was not compiled for this target and the link would result in a 404. Signed-off-by: Jacob Baungard Hansen * install-binaries: don't call directly if sourced If install-binaries was sourced from another script, and that script was called with cmd line args, an error would be returned such as the below: ``` install-binaries.sh: line 123: -n: command not found ``` This due to the `$*` at the end of the install-binaries script. With this commit we check if the file has been sourced or not, and only makes the call to `$*` if the install-binaries script is called directly. Signed-off-by: Jacob Baungard Hansen --------- Signed-off-by: Jacob Baungard Hansen --- scripts/install-binaries.sh | 21 ++++++-- .../clean-metrics-collector.sh | 3 ++ .../generate-metrics-data.sh | 50 ++++++------------- ...operator-role-crd-hostedclusters-read.yaml | 14 ++++++ ...lity-operator-crd-hostedclusters-read.yaml | 14 ++++++ .../setup-metrics-collector.sh | 41 +++++++-------- 6 files changed, 83 insertions(+), 60 deletions(-) create mode 100644 tools/simulator/metrics-collector/rb-endpoint-operator-role-crd-hostedclusters-read.yaml create mode 100644 tools/simulator/metrics-collector/role-endpoint-observability-operator-crd-hostedclusters-read.yaml diff --git a/scripts/install-binaries.sh b/scripts/install-binaries.sh index 9636baf09..c12472f42 100755 --- a/scripts/install-binaries.sh +++ b/scripts/install-binaries.sh @@ -11,7 +11,7 @@ OPERATOR_SDK_VERSION="${KUBECTL_VERSION:=v1.4.2}" KUBECTL_VERSION="${KUBECTL_VERSION:=v1.28.2}" KUSTOMIZE_VERSION="${KUSTOMIZE_VERSION:=v5.3.0}" -JQ_VERSION="${JQ_VERSION:=1.6}" +JQ_VERSION="${JQ_VERSION:=1.7.1}" KIND_VERSION="${KIND_VERSION:=v0.22.0}" BIN_DIR="${BIN_DIR:=/usr/local/bin}" @@ -64,7 +64,7 @@ install_jq() { if [[ "$(uname)" == "Linux" ]]; then curl -o jq -L "https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/jq-linux64" elif [[ "$(uname)" == "Darwin" ]]; then - curl -o jq -L "https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/jq-osx-$(uname -m)" + curl -o jq -L "https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/jq-macos-$(uname -m)" fi chmod +x ./jq && mv ./jq ${bin_dir}/jq fi @@ -83,6 +83,17 @@ install_kind() { fi } +install_gojsontoyaml() { + bin_dir=${1:-${BIN_DIR}} + if ! command -v gojsontoyaml &>/dev/null; then + if [[ "$(uname)" == "Linux" ]]; then + curl -L https://github.com/brancz/gojsontoyaml/releases/download/v0.1.0/gojsontoyaml_0.1.0_linux_amd64.tar.gz | tar -xz -C ${bin_dir} gojsontoyaml + elif [[ "$(uname)" == "Darwin" ]]; then + curl -L https://github.com/brancz/gojsontoyaml/releases/download/v0.1.0/gojsontoyaml_0.1.0_darwin_$(uname -m).tar.gz | tar -xz -C ${bin_dir} gojsontoyaml + fi + fi +} + install_build_deps() { bin_dir=${1:-${BIN_DIR}} install_operator_sdk ${bin_dir} @@ -104,5 +115,9 @@ install_e2e_tests_deps() { install_kustomize ${bin_dir} } +# check if script is called directly, or sourced +(return 0 2>/dev/null) && sourced=1 || sourced=0 # This allows functions within this file to be called individually from Makefile(s). -$* +if [[ $sourced == 0 ]]; then + $* +fi diff --git a/tools/simulator/metrics-collector/clean-metrics-collector.sh b/tools/simulator/metrics-collector/clean-metrics-collector.sh index 276ad9069..b5a37472a 100755 --- a/tools/simulator/metrics-collector/clean-metrics-collector.sh +++ b/tools/simulator/metrics-collector/clean-metrics-collector.sh @@ -62,6 +62,9 @@ for i in $(seq 1 ${NUMBERS}); do cluster_name=${MANAGED_CLUSTER_PREFIX}-${i} ${KUBECTL} delete deploy -n ${cluster_name} metrics-collector-deployment ${KUBECTL} delete clusterrolebinding ${cluster_name}-clusters-metrics-collector-view + ${KUBECTL} delete clusterrolebinding ${cluster_name}-endpoint-operator-role-crd-hostedclusters-read ${KUBECTL} delete -n ${cluster_name} secret/observability-managed-cluster-certs ${KUBECTL} delete ns ${cluster_name} done + +${KUBECTL} delete clusterrole endpoint-observability-operator-crd-hostedclusters-read diff --git a/tools/simulator/metrics-collector/generate-metrics-data.sh b/tools/simulator/metrics-collector/generate-metrics-data.sh index 953322dc8..a45a36d55 100755 --- a/tools/simulator/metrics-collector/generate-metrics-data.sh +++ b/tools/simulator/metrics-collector/generate-metrics-data.sh @@ -10,9 +10,12 @@ WORKDIR="$( cd "$(dirname "$0")" pwd -P )" + +source ${WORKDIR}/../../../scripts/install-binaries.sh + # Create bin directory and add it to PATH -mkdir -p ${ROOTDIR}/bin -export PATH={ROOTDIR}/bin:${PATH} +mkdir -p ${WORKDIR}/bin +export PATH=${PATH}:${WORKDIR}/bin # tmp output directory for metrics list TMP_OUT=$(mktemp -d /tmp/metrics.XXXXXXXXXX) @@ -33,43 +36,23 @@ fi # install kubectl KUBECTL="kubectl" -if ! command -v kubectl &>/dev/null; then - if command -v oc &>/dev/null; then - KUBECTL="oc" - else - if [[ "$(uname)" == "Linux" ]]; then - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl - elif [[ "$(uname)" == "Darwin" ]]; then - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl - fi - chmod +x ./kubectl && mv ./kubectl ${WORK_DIR}/bin/kubectl - fi -fi +install_kubectl ${WORKDIR}/bin # install jq -if ! command -v jq &>/dev/null; then - if [[ "$(uname)" == "Linux" ]]; then - curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 - elif [[ "$(uname)" == "Darwin" ]]; then - curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 - fi - chmod +x ./jq - chmod +x ./jq && mv ./jq ${WORK_DIR}/bin/jq -fi +install_jq ${WORKDIR}/bin # install gojsontoyaml -GOBIN=${WORK_DIR}/bin go install github.com/brancz/gojsontoyaml -GOJSONTOYAML_BIN=${WORK_DIR}/bin/gojsontoyaml +install_gojsontoyaml ${WORKDIR}/bin function get_metrics_list() { echo "getting metrics list..." if [[ -z ${IS_GENERATING_OCP311_METRICS} ]]; then - matches=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"') - names=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"') + matches=$(curl -L ${METRICS_ALLOW_LIST_URL} | gojsontoyaml --yamltojson | jq -r '.data."metrics_list.yaml"' | gojsontoyaml --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"') + names=$(curl -L ${METRICS_ALLOW_LIST_URL} | gojsontoyaml --yamltojson | jq -r '.data."metrics_list.yaml"' | gojsontoyaml --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"') echo $matches $names | jq -s . >${METRICS_JSON_OUT} else - matches=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"') - names=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"') + matches=$(curl -L ${METRICS_ALLOW_LIST_URL} | gojsontoyaml --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | gojsontoyaml --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"') + names=$(curl -L ${METRICS_ALLOW_LIST_URL} | gojsontoyaml --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | gojsontoyaml --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"') echo $matches $names | jq -s . >${METRICS_JSON_OUT} fi } @@ -77,10 +60,10 @@ function get_metrics_list() { function get_recordingrules_list() { echo "getting recordingrules list..." if [[ -z ${IS_GENERATING_OCP311_METRICS} ]]; then - recordingrules=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq '.recording_rules[]') + recordingrules=$(curl -L ${METRICS_ALLOW_LIST_URL} | gojsontoyaml --yamltojson | jq -r '.data."metrics_list.yaml"' | gojsontoyaml --yamltojson | jq '.recording_rules[]') echo "$recordingrules" | jq -s . >${RECORDINGRULES_JSON_OUT} else - recordingrules=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq '.recording_rules[]') + recordingrules=$(curl -L ${METRICS_ALLOW_LIST_URL} | gojsontoyaml --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | gojsontoyaml --yamltojson | jq '.recording_rules[]') echo "$recordingrules" | jq -s . >${RECORDINGRULES_JSON_OUT} fi } @@ -101,9 +84,8 @@ function generate_recordingrules() { cat ${RECORDINGRULES_JSON_OUT} | jq -cr '.[]' | while read item; do record=$(jq -r '.record' <<<"$item") expr=$(jq -r '.expr' <<<"$item") - #expr=${expr//\"/\\\"} - expr=$(echo "${expr}" | tr -d " ") - querycmd="${query} $(printf -- "--data-urlencode query=%s" ${expr})" + urlencode=$(printf %s "${expr}" | jq -s -R -r @uri) + querycmd="${query} -d query=${urlencode}" echo -e "\n# TYPE ${record} untyped" >>${TIME_SERIES_OUT} ${querycmd} | jq -r '.data.result' | jq -cr '.[]' | while read result; do vec="${record}" diff --git a/tools/simulator/metrics-collector/rb-endpoint-operator-role-crd-hostedclusters-read.yaml b/tools/simulator/metrics-collector/rb-endpoint-operator-role-crd-hostedclusters-read.yaml new file mode 100644 index 000000000..9c8839b30 --- /dev/null +++ b/tools/simulator/metrics-collector/rb-endpoint-operator-role-crd-hostedclusters-read.yaml @@ -0,0 +1,14 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __CLUSTER_NAME__-endpoint-operator-role-crd-hostedclusters-read + annotations: + owner: multicluster-operator +subjects: + - kind: ServiceAccount + name: endpoint-observability-operator-sa + namespace: __CLUSTER_NAME__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: endpoint-observability-operator-crd-hostedclusters-read diff --git a/tools/simulator/metrics-collector/role-endpoint-observability-operator-crd-hostedclusters-read.yaml b/tools/simulator/metrics-collector/role-endpoint-observability-operator-crd-hostedclusters-read.yaml new file mode 100644 index 000000000..765f20a6a --- /dev/null +++ b/tools/simulator/metrics-collector/role-endpoint-observability-operator-crd-hostedclusters-read.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: endpoint-observability-operator-crd-hostedclusters-read +rules: + - verbs: + - get + - list + apiGroups: + - apiextensions.k8s.io + - hypershift.openshift.io + resources: + - customresourcedefinitions + - hostedclusters diff --git a/tools/simulator/metrics-collector/setup-metrics-collector.sh b/tools/simulator/metrics-collector/setup-metrics-collector.sh index 6db913727..3b14c8a71 100755 --- a/tools/simulator/metrics-collector/setup-metrics-collector.sh +++ b/tools/simulator/metrics-collector/setup-metrics-collector.sh @@ -6,34 +6,19 @@ WORK_DIR="$( cd "$(dirname "$0")" pwd -P )" + +source ${WORK_DIR}/../../../scripts/install-binaries.sh + # Create bin directory and add it to PATH mkdir -p ${WORK_DIR}/bin export PATH=${PATH}:${WORK_DIR}/bin -if ! command -v jq &>/dev/null; then - if [[ "$(uname)" == "Linux" ]]; then - curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 - elif [[ "$(uname)" == "Darwin" ]]; then - curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 - fi - chmod +x ./jq - chmod +x ./jq && mv ./jq ${WORK_DIR}/bin/jq -fi +# install jq +install_jq ${WORK_DIR}/bin +# install kubectl KUBECTL="kubectl" -if ! command -v kubectl &>/dev/null; then - if command -v oc &>/dev/null; then - KUBECTL="oc" - else - echo "This script will install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your machine" - if [[ "$(uname)" == "Linux" ]]; then - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl - elif [[ "$(uname)" == "Darwin" ]]; then - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl - fi - chmod +x ./kubectl && mv ./kubectl ${WORK_DIR}/bin/kubectl - fi -fi +install_kubectl ${WORK_DIR}/bin SED_COMMAND='sed -i' if [[ "$(uname)" == "Darwin" ]]; then @@ -107,7 +92,7 @@ if ! [[ ${WORKERS} =~ ${re} ]]; then exit 1 fi -OBSERVABILITY_NS="open-cluster-management-addon-observability" +OBSERVABILITY_NS="open-cluster-management-observability" # metrics data source image DEFAULT_METRICS_IMAGE="quay.io/ocm-observability/metrics-data:2.4.0" @@ -154,10 +139,20 @@ for i in $(seq 1 ${NUMBERS}); do ${KUBECTL} -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' ${KUBECTL} -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' + # deploy role + cat "role-endpoint-observability-operator-crd-hostedclusters-read.yaml" | ${KUBECTL} -n ${cluster_name} apply -f - + # deploy ClusterRoleBinding for read metrics from OCP prometheus rolebinding_yaml_file=${cluster_name}-metrics-collector-view.yaml cp -rf metrics-collector-view.yaml "$rolebinding_yaml_file" ${SED_COMMAND} "s~__CLUSTER_NAME__~${cluster_name}~g" "${rolebinding_yaml_file}" cat "${rolebinding_yaml_file}" | ${KUBECTL} -n ${cluster_name} apply -f - rm -f "${rolebinding_yaml_file}" + + # deploy ClusterRoleBinding for reading CRDs and HosterClusters + rolebinding_yaml_file=${cluster_name}-rb-endpoint-operator-role-crd-hostedclusters-read.yaml + cp -rf rb-endpoint-operator-role-crd-hostedclusters-read.yaml "$rolebinding_yaml_file" + ${SED_COMMAND} "s~__CLUSTER_NAME__~${cluster_name}~g" "${rolebinding_yaml_file}" + cat "${rolebinding_yaml_file}" | ${KUBECTL} -n ${cluster_name} apply -f - + rm -f "${rolebinding_yaml_file}" done From 462d4448f5886d0679da8f3c6e6ef3fdbfc1083b Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Tue, 14 May 2024 16:35:16 +0200 Subject: [PATCH 12/33] [ACM-10812]: retry status update on conflict (#1427) * retry status update on conflict Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * add maxConditions handling Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * return err Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * sort status condition Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --------- Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .../observabilityaddon_controller.go | 25 ++- operators/endpointmetrics/pkg/util/status.go | 116 +++++++++---- .../endpointmetrics/pkg/util/status_test.go | 155 +++++++++++++++--- 3 files changed, 232 insertions(+), 64 deletions(-) diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go index dfd6b6718..7f3d1a4b5 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go @@ -201,7 +201,12 @@ func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.R log.Error(err, "OCP prometheus service does not exist") // ACM 8509: Special case for hub/local cluster metrics collection // We do not report status for hub endpoint operator - util.ReportStatus(ctx, r.Client, obsAddon, "NotSupported", !isHubMetricsCollector) + if !isHubMetricsCollector { + if err := util.ReportStatus(ctx, r.Client, util.NotSupportedStatus, obsAddon.Name, obsAddon.Namespace); err != nil { + log.Error(err, "Failed to report status") + } + } + return ctrl.Result{}, nil } return ctrl.Result{}, fmt.Errorf("failed to check prometheus resource: %w", err) @@ -297,19 +302,27 @@ func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.R 1, forceRestart) if err != nil { - util.ReportStatus(ctx, r.Client, obsAddon, "Degraded", !isHubMetricsCollector) + if !isHubMetricsCollector { + if err := util.ReportStatus(ctx, r.Client, util.DegradedStatus, obsAddon.Name, obsAddon.Namespace); err != nil { + log.Error(err, "Failed to report status") + } + } return ctrl.Result{}, fmt.Errorf("failed to update metrics collectors: %w", err) } - if created { - util.ReportStatus(ctx, r.Client, obsAddon, "Deployed", !isHubMetricsCollector) + if created && !isHubMetricsCollector { + if err := util.ReportStatus(ctx, r.Client, util.DeployedStatus, obsAddon.Name, obsAddon.Namespace); err != nil { + log.Error(err, "Failed to report status") + } } } else { deleted, err := updateMetricsCollectors(ctx, r.Client, obsAddon.Spec, *hubInfo, clusterID, clusterType, 0, false) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to update metrics collectors: %w", err) } - if deleted { - util.ReportStatus(ctx, r.Client, obsAddon, "Disabled", !isHubMetricsCollector) + if deleted && !isHubMetricsCollector { + if err := util.ReportStatus(ctx, r.Client, util.DisabledStatus, obsAddon.Name, obsAddon.Namespace); err != nil { + log.Error(err, "Failed to report status") + } } } diff --git a/operators/endpointmetrics/pkg/util/status.go b/operators/endpointmetrics/pkg/util/status.go index f1230aa40..d760a7933 100644 --- a/operators/endpointmetrics/pkg/util/status.go +++ b/operators/endpointmetrics/pkg/util/status.go @@ -6,49 +6,101 @@ package util import ( "context" + "sort" "time" oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" ) +type StatusConditionName string + +const ( + DeployedStatus StatusConditionName = "Deployed" + DisabledStatus StatusConditionName = "Disabled" + DegradedStatus StatusConditionName = "Degraded" + NotSupportedStatus StatusConditionName = "NotSupported" + MaxStatusConditionsCount = 10 +) + var ( - conditions = map[string]map[string]string{ - "Deployed": { - "type": "Progressing", - "reason": "Deployed", - "message": "Metrics collector deployed"}, - "Disabled": { - "type": "Disabled", - "reason": "Disabled", - "message": "enableMetrics is set to False"}, - "Degraded": { - "type": "Degraded", - "reason": "Degraded", - "message": "Metrics collector deployment not successful"}, - "NotSupported": { - "type": "NotSupported", - "reason": "NotSupported", - "message": "No Prometheus service found in this cluster"}, + conditions = map[StatusConditionName]*oav1beta1.StatusCondition{ + DeployedStatus: { + Type: "Progressing", + Reason: "Deployed", + Message: "Metrics collector deployed", + Status: metav1.ConditionTrue, + }, + DisabledStatus: { + Type: "Disabled", + Reason: "Disabled", + Message: "enableMetrics is set to False", + Status: metav1.ConditionTrue, + }, + DegradedStatus: { + Type: "Degraded", + Reason: "Degraded", + Message: "Metrics collector deployment not successful", + Status: metav1.ConditionTrue, + }, + NotSupportedStatus: { + Type: "NotSupported", + Reason: "NotSupported", + Message: "No Prometheus service found in this cluster", + Status: metav1.ConditionTrue, + }, } ) -func ReportStatus(ctx context.Context, client client.Client, i *oav1beta1.ObservabilityAddon, t string, reportStatus bool) { - if !reportStatus { - return - } - i.Status.Conditions = []oav1beta1.StatusCondition{ - { - Type: conditions[t]["type"], - Status: metav1.ConditionTrue, - LastTransitionTime: metav1.NewTime(time.Now()), - Reason: conditions[t]["reason"], - Message: conditions[t]["message"], - }, +func ReportStatus(ctx context.Context, client client.Client, condition StatusConditionName, addonName, addonNs string) error { + newCondition := conditions[condition].DeepCopy() + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + + // Fetch the ObservabilityAddon instance in local cluster, and update the status + // Retry on conflict + obsAddon := &oav1beta1.ObservabilityAddon{} + retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := client.Get(ctx, types.NamespacedName{Name: addonName, Namespace: addonNs}, obsAddon); err != nil { + return err + } + + if !shouldAppendCondition(obsAddon.Status.Conditions, newCondition) { + return nil + } + + obsAddon.Status.Conditions = append(obsAddon.Status.Conditions, *newCondition) + + if len(obsAddon.Status.Conditions) > MaxStatusConditionsCount { + obsAddon.Status.Conditions = obsAddon.Status.Conditions[len(obsAddon.Status.Conditions)-MaxStatusConditionsCount:] + } + + return client.Status().Update(ctx, obsAddon) + }) + if retryErr != nil { + return retryErr } - err := client.Status().Update(ctx, i) - if err != nil { - log.Error(err, "Failed to update status for observabilityaddon") + + return nil +} + +// shouldAppendCondition checks if the new condition should be appended to the status conditions +// based on the last condition in the slice. +func shouldAppendCondition(conditions []oav1beta1.StatusCondition, newCondition *oav1beta1.StatusCondition) bool { + if len(conditions) == 0 { + return true } + + sort.Slice(conditions, func(i, j int) bool { + return conditions[i].LastTransitionTime.Before(&conditions[j].LastTransitionTime) + }) + + lastCondition := conditions[len(conditions)-1] + + return lastCondition.Type != newCondition.Type || + lastCondition.Status != newCondition.Status || + lastCondition.Reason != newCondition.Reason || + lastCondition.Message != newCondition.Message } diff --git a/operators/endpointmetrics/pkg/util/status_test.go b/operators/endpointmetrics/pkg/util/status_test.go index 9ef66a8ab..7c31cba96 100644 --- a/operators/endpointmetrics/pkg/util/status_test.go +++ b/operators/endpointmetrics/pkg/util/status_test.go @@ -2,17 +2,23 @@ // Copyright Contributors to the Open Cluster Management project // Licensed under the Apache License 2.0 -package util +package util_test import ( "context" "fmt" "testing" + "time" + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -38,35 +44,132 @@ func TestReportStatus(t *testing.T) { t.Fatalf("Unable to add oav1beta1 scheme: (%v)", err) } - expectedStatus := []oav1beta1.StatusCondition{ - { - Type: "NotSupported", - Status: metav1.ConditionTrue, - Reason: "NotSupported", - Message: "No Prometheus service found in this cluster", - }, - { - Type: "Progressing", - Status: metav1.ConditionTrue, - Reason: "Deployed", - Message: "Metrics collector deployed", - }, - { - Type: "Disabled", - Status: metav1.ConditionTrue, - Reason: "Disabled", - Message: "enableMetrics is set to False", - }, - } - - statusList := []string{"NotSupported", "Deployed", "Disabled"} + // New status should be appended + statusList := []util.StatusConditionName{util.NotSupportedStatus, util.DeployedStatus, util.DisabledStatus} s.AddKnownTypes(oav1beta1.GroupVersion, oa) c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() for i := range statusList { - ReportStatus(context.TODO(), c, oa, statusList[i], true) - if oa.Status.Conditions[0].Message != expectedStatus[i].Message || oa.Status.Conditions[0].Reason != expectedStatus[i].Reason || oa.Status.Conditions[0].Status != expectedStatus[i].Status || oa.Status.Conditions[0].Type != expectedStatus[i].Type { - t.Errorf("Error: Status not updated. Expected: %s, Actual: %s", expectedStatus[i], fmt.Sprintf("%+v\n", oa.Status.Conditions[0])) + if err := util.ReportStatus(context.Background(), c, statusList[i], oa.Name, oa.Namespace); err != nil { + t.Fatalf("Error reporting status: %v", err) + } + runtimeAddon := &oav1beta1.ObservabilityAddon{} + if err := c.Get(context.Background(), types.NamespacedName{Name: name, Namespace: testNamespace}, runtimeAddon); err != nil { + t.Fatalf("Error getting observabilityaddon: (%v)", err) } + + if len(runtimeAddon.Status.Conditions) != i+1 { + t.Errorf("Status not updated. Expected: %s, Actual: %s", statusList[i], fmt.Sprintf("%+v\n", runtimeAddon.Status.Conditions)) + } + + if runtimeAddon.Status.Conditions[i].Reason != string(statusList[i]) { + t.Errorf("Status not updated. Expected: %s, Actual: %s", statusList[i], runtimeAddon.Status.Conditions[i].Type) + } + + time.Sleep(1500 * time.Millisecond) // Sleep to ensure LastTransitionTime is different for each condition (1s resolution) + } + + // Change ordering of conditions: Get the list, change the order and update + runtimeAddon := &oav1beta1.ObservabilityAddon{} + if err := c.Get(context.Background(), types.NamespacedName{Name: name, Namespace: testNamespace}, runtimeAddon); err != nil { + t.Fatalf("Error getting observabilityaddon: %v", err) + } + conditions := runtimeAddon.Status.Conditions + conditions[0], conditions[len(conditions)-1] = conditions[len(conditions)-1], conditions[0] + runtimeAddon.Status.Conditions = conditions + if err := c.Status().Update(context.Background(), runtimeAddon); err != nil { + t.Fatalf("Error updating observabilityaddon: (%v)", err) + } + + // Same status than current one should not be appended + if err := util.ReportStatus(context.Background(), c, util.DisabledStatus, oa.Name, oa.Namespace); err != nil { + t.Fatalf("Error reporting status: %v", err) + } + runtimeAddon = &oav1beta1.ObservabilityAddon{} + if err := c.Get(context.Background(), types.NamespacedName{Name: name, Namespace: testNamespace}, runtimeAddon); err != nil { + t.Fatalf("Error getting observabilityaddon: %v", err) + } + + if len(runtimeAddon.Status.Conditions) != len(statusList) { + t.Errorf("Status should not be appended. Expected: %d, Actual: %d", len(statusList), len(runtimeAddon.Status.Conditions)) + } + + // Number of conditions should not exceed MaxStatusConditionsCount + statusList = []util.StatusConditionName{util.DeployedStatus, util.DisabledStatus, util.DegradedStatus} + for i := 0; i < util.MaxStatusConditionsCount+3; i++ { + status := statusList[i%len(statusList)] + if err := util.ReportStatus(context.Background(), c, status, oa.Name, oa.Namespace); err != nil { + t.Fatalf("Error reporting status: %v", err) + } + } + + runtimeAddon = &oav1beta1.ObservabilityAddon{} + if err := c.Get(context.Background(), types.NamespacedName{Name: name, Namespace: testNamespace}, runtimeAddon); err != nil { + t.Fatalf("Error getting observabilityaddon: (%v)", err) + } + + if len(runtimeAddon.Status.Conditions) != util.MaxStatusConditionsCount { + t.Errorf("Number of conditions should not exceed MaxStatusConditionsCount. Expected: %d, Actual: %d", util.MaxStatusConditionsCount, len(runtimeAddon.Status.Conditions)) + } +} + +func TestReportStatus_Conflict(t *testing.T) { + // Conflict on update should be retried + oa := newObservabilityAddon(name, testNamespace) + s := scheme.Scheme + oav1beta1.AddToScheme(s) + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(oa).Build() + conflictErr := errors.NewConflict(schema.GroupResource{Group: oav1beta1.GroupVersion.Group, Resource: "resource"}, name, fmt.Errorf("conflict")) + + c := newClientWithUpdateError(fakeClient, conflictErr) + if err := util.ReportStatus(context.Background(), c, util.DeployedStatus, name, testNamespace); err == nil { + t.Fatalf("Conflict error should be retried and return an error if it fails") + } + if c.UpdateCallsCount() <= 1 { + t.Errorf("Conflict error should be retried, called %d times", c.UpdateCallsCount()) + } +} + +// TestClient wraps a client.Client to customize operations for testing +type TestClient struct { + client.Client + UpdateError error + updateCallsCount int + statusWriter *TestStatusWriter +} + +func newClientWithUpdateError(c client.Client, updateError error) *TestClient { + ret := &TestClient{ + Client: c, + UpdateError: updateError, + } + ret.statusWriter = &TestStatusWriter{SubResourceWriter: c.Status(), updateError: &ret.UpdateError, callsCount: &ret.updateCallsCount} + return ret +} + +func (c *TestClient) Status() client.StatusWriter { + return c.statusWriter +} + +func (c *TestClient) UpdateCallsCount() int { + return c.updateCallsCount +} + +func (c *TestClient) Reset() { + c.updateCallsCount = 0 +} + +type TestStatusWriter struct { + client.SubResourceWriter + updateError *error + callsCount *int +} + +func (f *TestStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + *f.callsCount++ + + if *f.updateError != nil { + return *f.updateError } + return f.SubResourceWriter.Update(ctx, obj, opts...) } From c92cfb53ac5b0e070bc7aaec02711140fd5a7da4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Tue, 14 May 2024 22:51:09 +0200 Subject: [PATCH 13/33] Tests: Don't check remote_write_requests on spokes (#1433) * Tests: Don't check remote_write_requests on spokes Previously the test "Should have acm_remote_write_requests_total metrics with correct labels/value" looked for the metric for both the hub cluster and any spokes. However, the spokes isn't supposed to expose this metric, hence the test would always fail if any managed clusters were added to the test setup. Signed-off-by: Jacob Baungard Hansen * Examples: remove `cleanupInterval` This configuration option no longer exists, so removing from example files used in tests. Avoids the following warnings: ``` W0513 07:54:14.364463 11051 warnings.go:70] unknown field "spec.advanced.retentionConfig.cleanupInterval" ``` Signed-off-by: Jacob Baungard Hansen --------- Signed-off-by: Jacob Baungard Hansen --- .../v1beta2/custom-certs/observability.yaml | 1 - examples/export/v1beta2/observability.yaml | 1 - .../v1beta2/custom-certs/observability.yaml | 1 - examples/mco/e2e/v1beta2/observability.yaml | 1 - tests/pkg/tests/observability_export_test.go | 62 ++++++++++--------- 5 files changed, 34 insertions(+), 32 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index b9ebf25fd..e31ccb65f 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index bc0a91d3d..0ba878961 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index 63b190eb0..923c33e32 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index ab742dbeb..c4363d5f9 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -30,7 +30,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index 634a1a1b1..86364cc72 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -54,36 +54,42 @@ var _ = Describe("Observability:", func() { yamlB, )).NotTo(HaveOccurred()) + // Get name of the hub cluster + hubClusterName := "local-cluster" + for _, cluster := range testOptions.ManagedClusters { + if cluster.BaseDomain == testOptions.HubCluster.BaseDomain { + hubClusterName = cluster.Name + } + } + By("Waiting for metrics acm_remote_write_requests_total on grafana console") Eventually(func() error { - for _, cluster := range clusters { - query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", cluster) - err, _ := utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`}, - ) - if err != nil { - return err - } - err, _ = utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`, - `"code":"200`, `"name":"thanos-receiver"`}, - ) - if err != nil { - return errors.New("metrics not forwarded to thanos-receiver") - } - err, _ = utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`, - `"code":"204`, `"name":"victoriametrics"`}, - ) - if err != nil { - return errors.New("metrics not forwarded to victoriametrics") - } + query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", hubClusterName) + err, _ := utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`}, + ) + if err != nil { + return err + } + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`, + `"code":"200`, `"name":"thanos-receiver"`}, + ) + if err != nil { + return errors.New("metrics not forwarded to thanos-receiver") + } + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`, + `"code":"204`, `"name":"victoriametrics"`}, + ) + if err != nil { + return errors.New("metrics not forwarded to victoriametrics") } return nil }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) From fcc1b80f9176aad6251c6ea873611960fdf5d2ce Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Wed, 15 May 2024 14:15:53 +0200 Subject: [PATCH 14/33] retry manifest work (#1434) Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .../controllers/placementrule/manifestwork.go | 58 ++++++++++--------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/operators/multiclusterobservability/controllers/placementrule/manifestwork.go b/operators/multiclusterobservability/controllers/placementrule/manifestwork.go index 5b72f1157..0ca19672b 100644 --- a/operators/multiclusterobservability/controllers/placementrule/manifestwork.go +++ b/operators/multiclusterobservability/controllers/placementrule/manifestwork.go @@ -18,6 +18,7 @@ import ( "golang.org/x/exp/slices" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/client-go/util/retry" "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" @@ -162,14 +163,12 @@ func createManifestwork(c client.Client, work *workv1.ManifestWork) error { err = c.Create(context.TODO(), work) if err != nil { - log.Error(err, "Failed to create manifestwork", "namespace", namespace, "name", name) logSizeErrorDetails(fmt.Sprint(err), work) - return err + return fmt.Errorf("failed to create manifestwork %s/%s: %w", namespace, name, err) } return nil } else if err != nil { - log.Error(err, "Failed to check manifestwork", namespace, "name", name) - return err + return fmt.Errorf("failed to check manifestwork %s/%s: %w", namespace, name, err) } if found.GetDeletionTimestamp() != nil { @@ -177,33 +176,33 @@ func createManifestwork(c client.Client, work *workv1.ManifestWork) error { return errors.New("existing manifestwork is terminating, skip and reconcile later") } - manifests := work.Spec.Workload.Manifests - updated := false - if len(found.Spec.Workload.Manifests) == len(manifests) { - for i, m := range found.Spec.Workload.Manifests { - if !util.CompareObject(m.RawExtension, manifests[i].RawExtension) { - updated = true - break - } - } - } else { - updated = true + if !shouldUpdateManifestWork(work.Spec.Workload.Manifests, found.Spec.Workload.Manifests) { + log.Info("manifestwork already existed/unchanged", "namespace", namespace) + return nil } - if updated { - log.Info("Updating manifestwork", namespace, namespace, "name", name) - found.Spec.Workload.Manifests = manifests - err = c.Update(context.TODO(), found) - if err != nil { - log.Error(err, "Failed to update monitoring-endpoint-monitoring-work work") - logSizeErrorDetails(fmt.Sprint(err), work) - return err + log.Info("Updating manifestwork", "namespace", namespace, "name", name) + found.Spec.Workload.Manifests = work.Spec.Workload.Manifests + err = c.Update(context.TODO(), found) + if err != nil { + logSizeErrorDetails(fmt.Sprint(err), work) + return fmt.Errorf("failed to update manifestwork %s/%s: %w", namespace, name, err) + } + return nil +} + +func shouldUpdateManifestWork(desiredManifests []workv1.Manifest, foundManifests []workv1.Manifest) bool { + if len(desiredManifests) != len(foundManifests) { + return true + } + + for i, m := range desiredManifests { + if !util.CompareObject(m.RawExtension, foundManifests[i].RawExtension) { + return true } - return nil } - log.Info("manifestwork already existed/unchanged", "namespace", namespace) - return nil + return false } // generateGlobalManifestResources generates global resources, eg. manifestwork, @@ -458,7 +457,12 @@ func createManifestWorks( log.Info("Creating resource for hub metrics collection", "cluster", clusterName) err = createUpdateResourcesForHubMetricsCollection(c, manifests) } else { - err = createManifestwork(c, work) + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + return createManifestwork(c, work) + }) + if retryErr != nil { + return fmt.Errorf("failed to create manifestwork: %w", retryErr) + } } return err From 76325830dbf450721e1de7f857804269e0185e17 Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Wed, 15 May 2024 16:04:42 +0200 Subject: [PATCH 15/33] [ACM-11093]: apply security context for microshift (#1422) * apply security restrictions Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * add privileged Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --------- Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .../kube-state-metrics-deployment.yaml | 25 +++++++++++++------ .../prometheus/node-exporter-clusterRole.yaml | 9 +++++++ .../prometheus/node-exporter-daemonset.yaml | 22 ++++++++++++---- 3 files changed, 44 insertions(+), 12 deletions(-) diff --git a/operators/endpointmetrics/manifests/prometheus/kube-state-metrics-deployment.yaml b/operators/endpointmetrics/manifests/prometheus/kube-state-metrics-deployment.yaml index 59dbee9b3..1702b1201 100644 --- a/operators/endpointmetrics/manifests/prometheus/kube-state-metrics-deployment.yaml +++ b/operators/endpointmetrics/manifests/prometheus/kube-state-metrics-deployment.yaml @@ -36,7 +36,10 @@ spec: cpu: 10m memory: 190Mi securityContext: - runAsUser: 65534 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL - args: - --logtostderr - --secure-listen-address=:8443 @@ -55,9 +58,10 @@ spec: cpu: 20m memory: 20Mi securityContext: - runAsGroup: 65532 - runAsNonRoot: true - runAsUser: 65532 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL - args: - --logtostderr - --secure-listen-address=:9443 @@ -76,9 +80,16 @@ spec: cpu: 10m memory: 20Mi securityContext: - runAsGroup: 65532 - runAsNonRoot: true - runAsUser: 65532 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault nodeSelector: kubernetes.io/os: linux serviceAccountName: kube-state-metrics diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml index ad783ae9b..43c76d8b1 100644 --- a/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml @@ -15,3 +15,12 @@ rules: - subjectaccessreviews verbs: - create +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use + diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml index cc2bdc2f5..39c903973 100644 --- a/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml @@ -36,6 +36,13 @@ spec: requests: cpu: 102m memory: 200Mi + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL volumeMounts: - mountPath: /host/sys mountPropagation: HostToContainer @@ -59,7 +66,6 @@ spec: name: kube-rbac-proxy ports: - containerPort: 9100 - hostPort: 9100 name: https resources: limits: @@ -69,16 +75,22 @@ spec: cpu: 10m memory: 200Mi securityContext: - runAsGroup: 65532 - runAsNonRoot: true - runAsUser: 65532 + runAsUser: 65534 + runAsGroup: 65534 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL hostNetwork: true hostPID: true nodeSelector: kubernetes.io/os: linux securityContext: + privileged: false + readOnlyRootFilesystem: true runAsNonRoot: true - runAsUser: 65534 + seccompProfile: + type: RuntimeDefault serviceAccountName: node-exporter tolerations: - operator: Exists From 2c9cd9ebc2d79c1b613aa38ddba4b977cb229038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Thu, 16 May 2024 11:04:39 +0200 Subject: [PATCH 16/33] Add `cluster:node_cpu:ratio` to allowlist (#1409) We add this metric to the allowlist as it will be used to optimize dashboard performance for the fleet wide CPU widgets. Signed-off-by: Jacob Baungard Hansen --- .../manifests/base/config/metrics_allowlist.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml b/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml index 9b18230e4..63497ea4d 100644 --- a/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml +++ b/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml @@ -17,6 +17,7 @@ data: - cluster:cpu_usage_cores:sum - cluster:memory_usage:ratio - cluster:memory_usage_bytes:sum + - cluster:node_cpu:ratio - cluster:usage:resources:sum - cluster_infrastructure_provider - cluster_version @@ -402,4 +403,4 @@ data: expr: histogram_quantile(0.99,sum(rate(apiserver_request_duration_seconds_bucket{job=\"apiserver\", verb!=\"WATCH\", _id!=\"\"}[5m])) by (le, _id)) - record: sum:apiserver_request_total:1h expr: sum(rate(apiserver_request_total{job=\"apiserver\", _id!=\"\"}[1h])) by(code, instance, _id) - collect_rules: [] \ No newline at end of file + collect_rules: [] From caf27f33b2c2968f55eec6030e0522956d40177d Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Thu, 16 May 2024 13:25:23 +0200 Subject: [PATCH 17/33] handle update of missing kinds (#1426) Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .../observabilityaddon_controller.go | 2 +- .../multiclusterobservability_controller.go | 2 +- operators/pkg/deploying/deployer.go | 299 +++++++++--------- operators/pkg/deploying/deployer_test.go | 254 ++++++++++++++- 4 files changed, 407 insertions(+), 150 deletions(-) diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go index 7f3d1a4b5..e242cf538 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go @@ -267,7 +267,7 @@ func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.R } } - if err := deployer.Deploy(res); err != nil { + if err := deployer.Deploy(ctx, res); err != nil { return ctrl.Result{}, fmt.Errorf("failed to deploy %s %s/%s: %w", res.GetKind(), namespace, res.GetName(), err) } } diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go index dfc499a71..d877e29ea 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go @@ -275,7 +275,7 @@ func (r *MultiClusterObservabilityReconciler) Reconcile(ctx context.Context, req return ctrl.Result{}, err } } - if err := deployer.Deploy(res); err != nil { + if err := deployer.Deploy(ctx, res); err != nil { reqLogger.Error(err, fmt.Sprintf("Failed to deploy %s %s/%s", res.GetKind(), resNS, res.GetName())) return ctrl.Result{}, err diff --git a/operators/pkg/deploying/deployer.go b/operators/pkg/deploying/deployer.go index f71d64a4d..cae21c867 100644 --- a/operators/pkg/deploying/deployer.go +++ b/operators/pkg/deploying/deployer.go @@ -28,7 +28,7 @@ import ( var log = logf.Log.WithName("deploying") -type deployerFn func(*unstructured.Unstructured, *unstructured.Unstructured) error +type deployerFn func(context.Context, *unstructured.Unstructured, *unstructured.Unstructured) error // Deployer is used create or update the resources. type Deployer struct { @@ -51,23 +51,29 @@ func NewDeployer(client client.Client) *Deployer { "Prometheus": deployer.updatePrometheus, "PrometheusRule": deployer.updatePrometheusRule, "Ingress": deployer.updateIngress, + "Role": deployer.updateRole, + "RoleBinding": deployer.updateRoleBinding, + "ServiceAccount": deployer.updateServiceAccount, + "DaemonSet": deployer.updateDaemonSet, + "ServiceMonitor": deployer.updateServiceMonitor, } return deployer } // Deploy is used to create or update the resources. -func (d *Deployer) Deploy(obj *unstructured.Unstructured) error { +func (d *Deployer) Deploy(ctx context.Context, obj *unstructured.Unstructured) error { + // Create the resource if it doesn't exist found := &unstructured.Unstructured{} found.SetGroupVersionKind(obj.GroupVersionKind()) err := d.client.Get( - context.TODO(), + ctx, types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, found, ) if err != nil { if errors.IsNotFound(err) { log.Info("Create", "Kind", obj.GroupVersionKind(), "Name", obj.GetName()) - return d.client.Create(context.TODO(), obj) + return d.client.Create(ctx, obj) } return err } @@ -84,51 +90,34 @@ func (d *Deployer) Deploy(obj *unstructured.Unstructured) error { } } + // The resource exists, update it deployerFn, ok := d.deployerFns[found.GetKind()] if ok { - return deployerFn(obj, found) + return deployerFn(ctx, obj, found) } else { log.Info("deployerFn not found", "kind", found.GetKind()) } return nil } -func (d *Deployer) updateDeployment(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeDepoly := &appsv1.Deployment{} - err := json.Unmarshal(runtimeJSON, runtimeDepoly) +func (d *Deployer) updateDeployment(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredDeploy, runtimeDepoly, err := unstructuredPairToTyped[appsv1.Deployment](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Deployment %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredDepoly := &appsv1.Deployment{} - err = json.Unmarshal(desiredJSON, desiredDepoly) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal Deployment %s", runtimeObj.GetName())) + return err } - if !apiequality.Semantic.DeepDerivative(desiredDepoly.Spec, runtimeDepoly.Spec) { + if !apiequality.Semantic.DeepDerivative(desiredDeploy.Spec, runtimeDepoly.Spec) { logUpdateInfo(runtimeObj) - return d.client.Update(context.TODO(), desiredDepoly) + return d.client.Update(ctx, desiredDeploy) } return nil } -func (d *Deployer) updateStatefulSet(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeDepoly := &appsv1.StatefulSet{} - err := json.Unmarshal(runtimeJSON, runtimeDepoly) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime StatefulSet %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredDepoly := &appsv1.StatefulSet{} - err = json.Unmarshal(desiredJSON, desiredDepoly) +func (d *Deployer) updateStatefulSet(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredDepoly, runtimeDepoly, err := unstructuredPairToTyped[appsv1.StatefulSet](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal StatefulSet %s", runtimeObj.GetName())) + return err } if !apiequality.Semantic.DeepDerivative(desiredDepoly.Spec.Template, runtimeDepoly.Spec.Template) || @@ -136,166 +125,104 @@ func (d *Deployer) updateStatefulSet(desiredObj, runtimeObj *unstructured.Unstru logUpdateInfo(runtimeObj) runtimeDepoly.Spec.Replicas = desiredDepoly.Spec.Replicas runtimeDepoly.Spec.Template = desiredDepoly.Spec.Template - return d.client.Update(context.TODO(), runtimeDepoly) + return d.client.Update(ctx, runtimeDepoly) } return nil } -func (d *Deployer) updateService(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeService := &corev1.Service{} - err := json.Unmarshal(runtimeJSON, runtimeService) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Service %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredService := &corev1.Service{} - err = json.Unmarshal(desiredJSON, desiredService) +func (d *Deployer) updateService(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredService, runtimeService, err := unstructuredPairToTyped[corev1.Service](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal Service %s", runtimeObj.GetName())) + return err } if !apiequality.Semantic.DeepDerivative(desiredService.Spec, runtimeService.Spec) { desiredService.ObjectMeta.ResourceVersion = runtimeService.ObjectMeta.ResourceVersion desiredService.Spec.ClusterIP = runtimeService.Spec.ClusterIP logUpdateInfo(runtimeObj) - return d.client.Update(context.TODO(), desiredService) + return d.client.Update(ctx, desiredService) } return nil } -func (d *Deployer) updateConfigMap(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeConfigMap := &corev1.ConfigMap{} - err := json.Unmarshal(runtimeJSON, runtimeConfigMap) +func (d *Deployer) updateConfigMap(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredConfigMap, runtimeConfigMap, err := unstructuredPairToTyped[corev1.ConfigMap](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime ConfigMap %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredConfigMap := &corev1.ConfigMap{} - err = json.Unmarshal(desiredJSON, desiredConfigMap) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal ConfigMap %s", runtimeObj.GetName())) + return err } if !apiequality.Semantic.DeepDerivative(desiredConfigMap.Data, runtimeConfigMap.Data) { logUpdateInfo(runtimeObj) - return d.client.Update(context.TODO(), desiredConfigMap) + return d.client.Update(ctx, desiredConfigMap) } return nil } -func (d *Deployer) updateSecret(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeSecret := &corev1.Secret{} - err := json.Unmarshal(runtimeJSON, runtimeSecret) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Secret %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredSecret := &corev1.Secret{} - err = json.Unmarshal(desiredJSON, desiredSecret) +func (d *Deployer) updateSecret(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredSecret, runtimeSecret, err := unstructuredPairToTyped[corev1.Secret](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal desired Secret %s", desiredObj.GetName())) + return err } if desiredSecret.Data == nil || !apiequality.Semantic.DeepDerivative(desiredSecret.Data, runtimeSecret.Data) { logUpdateInfo(desiredObj) - return d.client.Update(context.TODO(), desiredSecret) + return d.client.Update(ctx, desiredSecret) } return nil } -func (d *Deployer) updateClusterRole(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeClusterRole := &rbacv1.ClusterRole{} - err := json.Unmarshal(runtimeJSON, runtimeClusterRole) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime ClusterRole %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredClusterRole := &rbacv1.ClusterRole{} - err = json.Unmarshal(desiredJSON, desiredClusterRole) +func (d *Deployer) updateClusterRole(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredClusterRole, runtimeClusterRole, err := unstructuredPairToTyped[rbacv1.ClusterRole](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal desired ClusterRole %s", desiredObj.GetName())) + return err } if !apiequality.Semantic.DeepDerivative(desiredClusterRole.Rules, runtimeClusterRole.Rules) || !apiequality.Semantic.DeepDerivative(desiredClusterRole.AggregationRule, runtimeClusterRole.AggregationRule) { logUpdateInfo(desiredObj) - return d.client.Update(context.TODO(), desiredClusterRole) + return d.client.Update(ctx, desiredClusterRole) } return nil } -func (d *Deployer) updateClusterRoleBinding(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - err := json.Unmarshal(runtimeJSON, runtimeClusterRoleBinding) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime ClusterRoleBinding %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - err = json.Unmarshal(desiredJSON, desiredClusterRoleBinding) +func (d *Deployer) updateClusterRoleBinding(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredClusterRoleBinding, runtimeClusterRoleBinding, err := unstructuredPairToTyped[rbacv1.ClusterRoleBinding](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal desired ClusterRoleBinding %s", desiredObj.GetName())) + return err } if !apiequality.Semantic.DeepDerivative(desiredClusterRoleBinding.Subjects, runtimeClusterRoleBinding.Subjects) || !apiequality.Semantic.DeepDerivative(desiredClusterRoleBinding.RoleRef, runtimeClusterRoleBinding.RoleRef) { logUpdateInfo(desiredObj) - return d.client.Update(context.TODO(), desiredClusterRoleBinding) + return d.client.Update(ctx, desiredClusterRoleBinding) } return nil } -func (d *Deployer) updateCRD(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeCRD := &apiextensionsv1.CustomResourceDefinition{} - err := json.Unmarshal(runtimeJSON, runtimeCRD) +func (d *Deployer) updateCRD(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredCRD, runtimeCRD, err := unstructuredPairToTyped[apiextensionsv1.CustomResourceDefinition](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime CRD %s", runtimeObj.GetName())) + return err } - desiredJSON, _ := desiredObj.MarshalJSON() - desiredCRD := &apiextensionsv1.CustomResourceDefinition{} - err = json.Unmarshal(desiredJSON, desiredCRD) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal CRD %s", runtimeObj.GetName())) - } desiredCRD.ObjectMeta.ResourceVersion = runtimeCRD.ObjectMeta.ResourceVersion if !apiequality.Semantic.DeepDerivative(desiredCRD.Spec, runtimeCRD.Spec) { logUpdateInfo(runtimeObj) - return d.client.Update(context.TODO(), desiredCRD) + return d.client.Update(ctx, desiredCRD) } return nil } -func (d *Deployer) updatePrometheus(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimePrometheus := &prometheusv1.Prometheus{} - err := json.Unmarshal(runtimeJSON, runtimePrometheus) +func (d *Deployer) updatePrometheus(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredPrometheus, runtimePrometheus, err := unstructuredPairToTyped[prometheusv1.Prometheus](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Prometheus %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredPrometheus := &prometheusv1.Prometheus{} - err = json.Unmarshal(desiredJSON, desiredPrometheus) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal Prometheus %s", runtimeObj.GetName())) + return err } // On GKE clusters, it was observed that the runtime object was not in sync with the object attributes @@ -326,26 +253,17 @@ func (d *Deployer) updatePrometheus(desiredObj, runtimeObj *unstructured.Unstruc if !apiequality.Semantic.DeepDerivative(desiredPrometheus.Spec, runtimePrometheus.Spec) { logUpdateInfo(runtimeObj) - return d.client.Update(context.TODO(), desiredPrometheus) + return d.client.Update(ctx, desiredPrometheus) } else { log.Info("Runtime Prometheus and Desired Prometheus are semantically equal!") } return nil } -func (d *Deployer) updatePrometheusRule(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimePrometheusRule := &prometheusv1.PrometheusRule{} - err := json.Unmarshal(runtimeJSON, runtimePrometheusRule) - if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime PrometheusRule %s", runtimeObj.GetName())) - } - - desiredJSON, _ := desiredObj.MarshalJSON() - desiredPrometheusRule := &prometheusv1.PrometheusRule{} - err = json.Unmarshal(desiredJSON, desiredPrometheusRule) +func (d *Deployer) updatePrometheusRule(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredPrometheusRule, runtimePrometheusRule, err := unstructuredPairToTyped[prometheusv1.PrometheusRule](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal PrometheusRule %s", runtimeObj.GetName())) + return err } if !apiequality.Semantic.DeepDerivative(desiredPrometheusRule.Spec, runtimePrometheusRule.Spec) { @@ -354,34 +272,123 @@ func (d *Deployer) updatePrometheusRule(desiredObj, runtimeObj *unstructured.Uns desiredPrometheusRule.ResourceVersion = runtimePrometheusRule.ResourceVersion } - return d.client.Update(context.TODO(), desiredPrometheusRule) + return d.client.Update(ctx, desiredPrometheusRule) } return nil } -func (d *Deployer) updateIngress(desiredObj, runtimeObj *unstructured.Unstructured) error { - runtimeJSON, _ := runtimeObj.MarshalJSON() - runtimeIngress := &networkingv1.Ingress{} - err := json.Unmarshal(runtimeJSON, runtimeIngress) +func (d *Deployer) updateIngress(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredIngress, runtimeIngress, err := unstructuredPairToTyped[networkingv1.Ingress](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Ingress %s", runtimeObj.GetName())) + return err + } + + if !apiequality.Semantic.DeepDerivative(desiredIngress.Spec, runtimeIngress.Spec) { + logUpdateInfo(runtimeObj) + return d.client.Update(ctx, desiredIngress) } - desiredJSON, _ := desiredObj.MarshalJSON() - desiredIngress := &networkingv1.Ingress{} - err = json.Unmarshal(desiredJSON, desiredIngress) + return nil +} + +func (d *Deployer) updateRole(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredRole, runtimeRole, err := unstructuredPairToTyped[rbacv1.Role](desiredObj, runtimeObj) if err != nil { - log.Error(err, fmt.Sprintf("Failed to Unmarshal Ingress %s", runtimeObj.GetName())) + return err } - if !apiequality.Semantic.DeepDerivative(desiredIngress.Spec, runtimeIngress.Spec) { + if !apiequality.Semantic.DeepDerivative(desiredRole.Rules, runtimeRole.Rules) { + logUpdateInfo(runtimeObj) + return d.client.Update(ctx, desiredRole) + } + + return nil +} + +func (d *Deployer) updateRoleBinding(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredRoleBinding, runtimeRoleBinding, err := unstructuredPairToTyped[rbacv1.RoleBinding](desiredObj, runtimeObj) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepDerivative(desiredRoleBinding.Subjects, runtimeRoleBinding.Subjects) || + !apiequality.Semantic.DeepDerivative(desiredRoleBinding.RoleRef, runtimeRoleBinding.RoleRef) { + logUpdateInfo(runtimeObj) + return d.client.Update(ctx, desiredRoleBinding) + } + + return nil +} + +func (d *Deployer) updateServiceAccount(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredServiceAccount, runtimeServiceAccount, err := unstructuredPairToTyped[corev1.ServiceAccount](desiredObj, runtimeObj) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepDerivative(desiredServiceAccount.ImagePullSecrets, runtimeServiceAccount.ImagePullSecrets) || + !apiequality.Semantic.DeepDerivative(desiredServiceAccount.Secrets, runtimeServiceAccount.Secrets) { + logUpdateInfo(runtimeObj) + return d.client.Update(ctx, desiredServiceAccount) + } + + return nil +} + +func (d *Deployer) updateDaemonSet(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredDaemonSet, runtimeDaemonSet, err := unstructuredPairToTyped[appsv1.DaemonSet](desiredObj, runtimeObj) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepDerivative(desiredDaemonSet.Spec, runtimeDaemonSet.Spec) { logUpdateInfo(runtimeObj) - return d.client.Update(context.TODO(), desiredIngress) + return d.client.Update(ctx, desiredDaemonSet) } return nil } +func (d *Deployer) updateServiceMonitor(ctx context.Context, desiredObj, runtimeObj *unstructured.Unstructured) error { + desiredServiceMonitor, runtimeServiceMonitor, err := unstructuredPairToTyped[prometheusv1.ServiceMonitor](desiredObj, runtimeObj) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepDerivative(desiredServiceMonitor.Spec, runtimeServiceMonitor.Spec) { + logUpdateInfo(runtimeObj) + return d.client.Update(ctx, desiredServiceMonitor) + } + + return nil +} + +// unstructuredToType converts an unstructured.Unstructured object to a specified type. +// It marshals the object to JSON and then unmarshals it into the target type. +// The target parameter must be a pointer to the type T. +func unstructuredToType[T any](obj *unstructured.Unstructured, target T) error { + jsonData, err := obj.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(jsonData, target) +} + +// unstructuredPairToTyped converts a pair of unstructured.Unstructured objects to a specified type. +func unstructuredPairToTyped[T any](obja, objb *unstructured.Unstructured) (*T, *T, error) { + a := new(T) + if err := unstructuredToType(obja, a); err != nil { + return nil, nil, fmt.Errorf("failed to convert obja %s/%s/%s: %w", obja.GetKind(), obja.GetNamespace(), obja.GetName(), err) + } + + b := new(T) + if err := unstructuredToType(objb, b); err != nil { + return nil, nil, fmt.Errorf("failed to convert objb %s/%s/%s: %w", obja.GetKind(), obja.GetNamespace(), obja.GetName(), err) + } + + return a, b, nil +} + func logUpdateInfo(obj *unstructured.Unstructured) { log.Info("Update", "kind", obj.GroupVersionKind().Kind, "kindVersion", obj.GroupVersionKind().Version, "name", obj.GetName()) } diff --git a/operators/pkg/deploying/deployer_test.go b/operators/pkg/deploying/deployer_test.go index 1f68300ae..021a390f2 100644 --- a/operators/pkg/deploying/deployer_test.go +++ b/operators/pkg/deploying/deployer_test.go @@ -532,6 +532,256 @@ func TestDeploy(t *testing.T) { } }, }, + { + name: "create and update a role", + createObj: &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "Role", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "ns1", + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + "pods", + }, + Verbs: []string{ + "watch", + }, + APIGroups: []string{ + "", + }, + }, + }, + }, + updateObj: &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "Role", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-role", + Namespace: "ns1", + ResourceVersion: "1", + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + "pods", + }, + Verbs: []string{ + "watch", + "list", + "get", + }, + APIGroups: []string{ + "", + }, + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-role", + Namespace: "ns1", + } + obj := &rbacv1.Role{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.Rules[0].Verbs) != 3 { + t.Fatalf("fail to update the role") + } + }, + }, + { + name: "create and update a rolebinding", + createObj: &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "RoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "ns1", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "test-role", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "ns1", + }, + }, + }, + updateObj: &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "RoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rolebinding", + Namespace: "ns1", + ResourceVersion: "1", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "test-role", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "ns1", + }, + { + Kind: "User", + Name: "test-user", + Namespace: "ns1", + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-rolebinding", + Namespace: "ns1", + } + obj := &rbacv1.RoleBinding{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.Subjects) != 2 { + t.Fatalf("fail to update the rolebinding") + } + }, + }, + { + name: "create and update serviceaccount", + createObj: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "ns1", + }, + }, + updateObj: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "ns1", + ResourceVersion: "1", + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: "test-secret", + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-sa", + Namespace: "ns1", + } + obj := &corev1.ServiceAccount{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.ImagePullSecrets) == 0 { + t.Fatalf("fail to update the serviceaccount") + } + }, + }, + { + name: "create and update daemonset", + createObj: &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-daemonset", + Namespace: "ns1", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "myApp", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "myApp", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "test-image", + }, + }, + }, + }, + }, + }, + updateObj: &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-daemonset", + Namespace: "ns1", + ResourceVersion: "1", + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "myApp", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "myApp", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "test-image:latest", + }, + }, + }, + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-daemonset", + Namespace: "ns1", + } + obj := &appsv1.DaemonSet{} + client.Get(context.Background(), namespacedName, obj) + + if obj.Spec.Template.Spec.Containers[0].Image != "test-image:latest" { + t.Fatalf("fail to update the daemonset") + } + }, + }, } scheme := runtime.NewScheme() @@ -548,13 +798,13 @@ func TestDeploy(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { createObjUns, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(c.createObj) - err := deployer.Deploy(&unstructured.Unstructured{Object: createObjUns}) + err := deployer.Deploy(context.Background(), &unstructured.Unstructured{Object: createObjUns}) if err != nil { t.Fatalf("Cannot create the resource %v", err) } if c.updateObj != nil { updateObjUns, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(c.updateObj) - err = deployer.Deploy(&unstructured.Unstructured{Object: updateObjUns}) + err = deployer.Deploy(context.Background(), &unstructured.Unstructured{Object: updateObjUns}) if err != nil { t.Fatalf("Cannot update the resource %v", err) } From a0174e835d4d82e81d2176e4f9e83507d08e43a4 Mon Sep 17 00:00:00 2001 From: Subbarao Meduri Date: Thu, 16 May 2024 08:24:42 -0400 Subject: [PATCH 18/33] relocate kubeconfig to /workspace for non-root user access (#1437) Signed-off-by: Subbarao Meduri --- tests/Containerfile.operator | 8 ++++++-- tests/Dockerfile | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/Containerfile.operator b/tests/Containerfile.operator index 70b47f4e0..451994ee0 100644 --- a/tests/Containerfile.operator +++ b/tests/Containerfile.operator @@ -14,12 +14,16 @@ RUN go install github.com/onsi/ginkgo/ginkgo@v1.14.2 && go mod vendor && ginkgo # create new docker image to hold built artifacts FROM registry.access.redhat.com/ubi8/ubi-minimal:latest +# pre-create directories and set permissions +RUN mkdir -p /resources /results && \ + chown -R 1001:1001 /resources /results + # run as non-root USER 1001:1001 # expose env vars for runtime -ENV KUBECONFIG "/opt/.kube/config" -ENV IMPORT_KUBECONFIG "/opt/.kube/import-kubeconfig" +ENV KUBECONFIG "/workspace/.kube/config" +ENV IMPORT_KUBECONFIG "/workspace/.kube/import-kubeconfig" ENV OPTIONS "/resources/options.yaml" ENV REPORT_FILE "/results/results.xml" ENV GINKGO_DEFAULT_FLAGS "-slowSpecThreshold=120 -timeout 7200s" diff --git a/tests/Dockerfile b/tests/Dockerfile index b1fe66e9a..21845fc39 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -11,12 +11,16 @@ RUN go install github.com/onsi/ginkgo/ginkgo@v1.14.2 && go mod vendor && ginkgo # create new docker image to hold built artifacts FROM registry.access.redhat.com/ubi8/ubi-minimal:latest +# pre-create directories and set permissions +RUN mkdir -p /resources /results && \ + chown -R 1001:1001 /resources /results + # run as non-root USER 1001:1001 # expose env vars for runtime -ENV KUBECONFIG "/opt/.kube/config" -ENV IMPORT_KUBECONFIG "/opt/.kube/import-kubeconfig" +ENV KUBECONFIG "/workspace/.kube/config" +ENV IMPORT_KUBECONFIG "/workspace/.kube/import-kubeconfig" ENV OPTIONS "/resources/options.yaml" ENV REPORT_FILE "/results/results.xml" ENV GINKGO_DEFAULT_FLAGS "-slowSpecThreshold=120 -timeout 7200s" From 0bf59b1ef26b24c945c5c3f1999a61c7642adf0c Mon Sep 17 00:00:00 2001 From: Douglas Camata <159076+douglascamata@users.noreply.github.com> Date: Sat, 18 May 2024 11:46:31 +0200 Subject: [PATCH 19/33] Upgrade to Go 1.21 (#1440) Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --- cicd-scripts/run-e2e-tests.sh | 6 +-- collectors/metrics/Containerfile.operator | 2 +- collectors/metrics/Dockerfile | 2 +- go.mod | 2 +- go.sum | 39 +++++++++++++++++++ loaders/dashboards/Containerfile.operator | 2 +- loaders/dashboards/Dockerfile | 2 +- .../endpointmetrics/Containerfile.operator | 2 +- operators/endpointmetrics/Dockerfile | 2 +- .../Containerfile.operator | 2 +- .../multiclusterobservability/Dockerfile | 2 +- proxy/Containerfile.operator | 2 +- proxy/Dockerfile | 2 +- tests/Containerfile.operator | 2 +- tests/Dockerfile | 2 +- .../alert-forward/Containerfile.operator | 3 +- tools/simulator/alert-forward/Dockerfile | 3 +- .../metrics-extractor/Containerfile.operator | 2 +- .../metrics-extractor/Dockerfile | 2 +- 19 files changed, 59 insertions(+), 22 deletions(-) diff --git a/cicd-scripts/run-e2e-tests.sh b/cicd-scripts/run-e2e-tests.sh index 282bfe208..d9bb22e7c 100755 --- a/cicd-scripts/run-e2e-tests.sh +++ b/cicd-scripts/run-e2e-tests.sh @@ -92,13 +92,13 @@ if command -v ginkgo &>/dev/null; then else # just for Prow KinD vm # uninstall old go version(1.16) and install new version - wget -nv https://go.dev/dl/go1.20.4.linux-amd64.tar.gz + wget -nv https://go.dev/dl/go1.21.10.linux-amd64.tar.gz if command -v sudo >/dev/null 2>&1; then sudo rm -fr /usr/local/go - sudo tar -C /usr/local -xzf go1.20.4.linux-amd64.tar.gz + sudo tar -C /usr/local -xzf go1.21.10.linux-amd64.tar.gz # else # rm -fr /usr/local/go - # tar -C /usr/local -xzf go1.20.4.linux-amd64.tar.gz + # tar -C /usr/local -xzf go1.21.10.linux-amd64.tar.gz fi go install github.com/onsi/ginkgo/ginkgo@latest GINKGO_CMD="$(go env GOPATH)/bin/ginkgo" diff --git a/collectors/metrics/Containerfile.operator b/collectors/metrics/Containerfile.operator index ba66e9aa2..e1290ec03 100644 --- a/collectors/metrics/Containerfile.operator +++ b/collectors/metrics/Containerfile.operator @@ -1,7 +1,7 @@ # Copyright Contributors to the Open Cluster Management project # Licensed under the Apache License 2.0 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/collectors/metrics/Dockerfile b/collectors/metrics/Dockerfile index ab96373c7..05bfe8155 100644 --- a/collectors/metrics/Dockerfile +++ b/collectors/metrics/Dockerfile @@ -1,6 +1,6 @@ # Copyright Contributors to the Open Cluster Management project -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/go.mod b/go.mod index eca96f186..67ac77107 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/stolostron/multicluster-observability-operator -go 1.20 +go 1.21 require ( github.com/IBM/controller-filtered-cache v0.3.6 diff --git a/go.sum b/go.sum index 889318239..6afdecdfb 100644 --- a/go.sum +++ b/go.sum @@ -338,6 +338,7 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/Azure/go-ntlmssp v0.0.0-20211209120228-48547f28849e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -353,10 +354,15 @@ github.com/Jeffail/gabs/v2 v2.6.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= @@ -407,6 +413,7 @@ github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4 github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= +github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -519,6 +526,7 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190531201743-edce55837238/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -566,6 +574,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= @@ -598,10 +607,12 @@ github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= +github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= @@ -652,10 +663,12 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.6.13/go.mod h1:qEySVqXrEugbHKvmhI8ZqtQi75/RHSSRNpffvB4I6Bw= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -672,6 +685,7 @@ github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -909,6 +923,7 @@ github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGt github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -938,6 +953,7 @@ github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keL github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -1163,6 +1179,7 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= @@ -1182,6 +1199,7 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -1352,6 +1370,7 @@ github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -1365,6 +1384,7 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1409,6 +1429,7 @@ github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1429,6 +1450,7 @@ github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= @@ -1511,6 +1533,7 @@ github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzm github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1541,6 +1564,7 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20230915112357-693d4b64813c h1:ro/BvvpAikMoZc/fsxJN6jxmK+4uIbdNIK9nwaFQ5xo= github.com/openshift/api v0.0.0-20230915112357-693d4b64813c/go.mod h1:NFgA+laiQtptmjsp1trDnGqjV62nYzlUfQ6P5I9oqXA= @@ -1570,6 +1594,7 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/operator-framework/api v0.15.0 h1:4f9i0drtqHj7ykLoHxv92GR43S7MmQHhmFQkfm5YaGI= +github.com/operator-framework/api v0.15.0/go.mod h1:scnY9xqSeCsOdtJtNoHIXd7OtHZ14gj1hkDA4+DlgLY= github.com/ovh/go-ovh v1.1.0 h1:bHXZmw8nTgZin4Nv7JuaLs0KG5x54EQR7migYTd1zrk= github.com/ovh/go-ovh v1.1.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= @@ -1725,9 +1750,11 @@ github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJP github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shoenig/test v0.4.3/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -1744,6 +1771,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1763,6 +1791,7 @@ github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcD github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1785,11 +1814,13 @@ github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stolostron/backplane-operator v0.0.0-20220727154840-1f60baf1fb98 h1:fb77iXzaY4kud+wPqNPT6UgvPITK9q+O1D5FeUJ6qP0= +github.com/stolostron/backplane-operator v0.0.0-20220727154840-1f60baf1fb98/go.mod h1:IGZxghtPz8rJylGtW8XUAQdlqRai2j7aL4ymOINsP/c= github.com/stolostron/multiclusterhub-operator v0.0.0-20220902185016-e81ccfbecf55 h1:sNpuRgbyAEvOjayzShyPNt+Eg34jmJPNIUY9cFvUlwU= github.com/stolostron/multiclusterhub-operator v0.0.0-20220902185016-e81ccfbecf55/go.mod h1:YCJavcWI4f3PV/LbgMNWsYl/oCbH/Fbn4p+Epd9gro0= github.com/stolostron/observatorium-operator v0.0.0-20240403132649-1f7129fc3a27 h1:21h43ofoLC2hMPKH0fY+oglbxSH4rphoWOjbjiNTUes= github.com/stolostron/observatorium-operator v0.0.0-20240403132649-1f7129fc3a27/go.mod h1:fFyJt9/dkQ1/4NxiW4CjH4lj7brxGlkA4SscxoLfzYY= github.com/stolostron/search-v2-operator v0.0.0-20220721051905-143d28ab4f10 h1:USGd9WwtGqAflJ0sY7k41hCO5L5BuYaPElmAsZm/q4M= +github.com/stolostron/search-v2-operator v0.0.0-20220721051905-143d28ab4f10/go.mod h1:o73lDVENck8rRBnjt+PmbDer0MyMq2LQ7g8FsqQbQuw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1834,6 +1865,7 @@ github.com/thanos-io/thanos v0.30.0/go.mod h1:ve1mHR1dhCRqQlp0C5g+AFENoNu9GA4gtq github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1885,8 +1917,11 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= @@ -2819,6 +2854,7 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -2849,6 +2885,7 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I= +helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2912,7 +2949,9 @@ open-cluster-management.io/addon-framework v0.8.1-0.20231128122622-3bfdbffb237c/ open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30 h1:qzkatL1pCsMvA2KkuJ0ywWUqJ0ZI13ouMRVuAPTrhWk= open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30/go.mod h1:fnoEBW9pbikOWOzF4zuT9DQAgWbY3PpPT/MSDZ/4bxw= open-cluster-management.io/multicloud-operators-channel v0.8.0 h1:2Cr7AiIWc4maVnhBI2MagNc1YF3UU/VHHCrlSpG3Yr8= +open-cluster-management.io/multicloud-operators-channel v0.8.0/go.mod h1:E2Y3/mDp+U6glXp+LMn27ViRJ4BsHwJ6QzDLeENEJmc= open-cluster-management.io/multicloud-operators-subscription v0.8.0 h1:0YRrUErVU6K6xwExGNaCMF//FOfJT6XyeHAvSbZNEiY= +open-cluster-management.io/multicloud-operators-subscription v0.8.0/go.mod h1:R83lMSoaMfs3T1Z3ApRjfioA9AgPMzam+CS6XbO5FjU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/loaders/dashboards/Containerfile.operator b/loaders/dashboards/Containerfile.operator index f8200e65d..8743a4c21 100644 --- a/loaders/dashboards/Containerfile.operator +++ b/loaders/dashboards/Containerfile.operator @@ -1,7 +1,7 @@ # Copyright Contributors to the Open Cluster Management project # Licensed under the Apache License 2.0 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace COPY go.sum go.mod ./loaders/dashboards ./ diff --git a/loaders/dashboards/Dockerfile b/loaders/dashboards/Dockerfile index cff372324..c73574d27 100644 --- a/loaders/dashboards/Dockerfile +++ b/loaders/dashboards/Dockerfile @@ -1,6 +1,6 @@ # Copyright Contributors to the Open Cluster Management project -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace COPY go.sum go.mod ./loaders/dashboards ./ diff --git a/operators/endpointmetrics/Containerfile.operator b/operators/endpointmetrics/Containerfile.operator index c64a06604..ac3ce15d9 100644 --- a/operators/endpointmetrics/Containerfile.operator +++ b/operators/endpointmetrics/Containerfile.operator @@ -1,6 +1,6 @@ # Copyright (c) 2021 Red Hat, Inc. # Copyright Contributors to the Open Cluster Management project. -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/operators/endpointmetrics/Dockerfile b/operators/endpointmetrics/Dockerfile index 9bd4bf0a6..c211589ba 100644 --- a/operators/endpointmetrics/Dockerfile +++ b/operators/endpointmetrics/Dockerfile @@ -1,6 +1,6 @@ # Copyright (c) 2021 Red Hat, Inc. # Copyright Contributors to the Open Cluster Management project. -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/operators/multiclusterobservability/Containerfile.operator b/operators/multiclusterobservability/Containerfile.operator index a6dafcc69..d99108129 100644 --- a/operators/multiclusterobservability/Containerfile.operator +++ b/operators/multiclusterobservability/Containerfile.operator @@ -1,7 +1,7 @@ # Copyright Contributors to the Open Cluster Management project # Licensed under the Apache License 2.0 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/operators/multiclusterobservability/Dockerfile b/operators/multiclusterobservability/Dockerfile index 7887cda6e..296d2c18f 100644 --- a/operators/multiclusterobservability/Dockerfile +++ b/operators/multiclusterobservability/Dockerfile @@ -1,6 +1,6 @@ # Copyright Contributors to the Open Cluster Management project -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace diff --git a/proxy/Containerfile.operator b/proxy/Containerfile.operator index fa8b9e858..ad8c85b5a 100644 --- a/proxy/Containerfile.operator +++ b/proxy/Containerfile.operator @@ -1,6 +1,6 @@ # Copyright Contributors to the Open Cluster Management project -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/proxy/Dockerfile b/proxy/Dockerfile index 61f2871a4..69663cc2e 100644 --- a/proxy/Dockerfile +++ b/proxy/Dockerfile @@ -1,6 +1,6 @@ # Copyright Contributors to the Open Cluster Management project -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace COPY go.sum go.mod ./ diff --git a/tests/Containerfile.operator b/tests/Containerfile.operator index 451994ee0..efa7a4609 100644 --- a/tests/Containerfile.operator +++ b/tests/Containerfile.operator @@ -1,7 +1,7 @@ # Copyright Contributors to the Open Cluster Management project # Licensed under the Apache License 2.0 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace # copy go tests into build image diff --git a/tests/Dockerfile b/tests/Dockerfile index 21845fc39..2c05917a7 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace # copy go tests into build image diff --git a/tools/simulator/alert-forward/Containerfile.operator b/tools/simulator/alert-forward/Containerfile.operator index f9285820b..25f63257a 100644 --- a/tools/simulator/alert-forward/Containerfile.operator +++ b/tools/simulator/alert-forward/Containerfile.operator @@ -1,7 +1,7 @@ # Copyright Contributors to the Open Cluster Management project # Licensed under the Apache License 2.0 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder WORKDIR /workspace COPY go.sum go.mod ./ @@ -22,4 +22,3 @@ COPY tools/simulator/alert-forward/alerts.json /tmp/ USER ${USER_UID} ENTRYPOINT ["/usr/local/bin/alert-forwarder"] - diff --git a/tools/simulator/alert-forward/Dockerfile b/tools/simulator/alert-forward/Dockerfile index 76001980f..fd1043012 100644 --- a/tools/simulator/alert-forward/Dockerfile +++ b/tools/simulator/alert-forward/Dockerfile @@ -1,6 +1,6 @@ # Copyright Contributors to the Open Cluster Management project -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder WORKDIR /workspace COPY go.sum go.mod ./ @@ -21,4 +21,3 @@ COPY tools/simulator/alert-forward/alerts.json /tmp/ USER ${USER_UID} ENTRYPOINT ["/usr/local/bin/alert-forwarder"] - diff --git a/tools/simulator/metrics-collector/metrics-extractor/Containerfile.operator b/tools/simulator/metrics-collector/metrics-extractor/Containerfile.operator index 207bcdc5d..3cf797cea 100644 --- a/tools/simulator/metrics-collector/metrics-extractor/Containerfile.operator +++ b/tools/simulator/metrics-collector/metrics-extractor/Containerfile.operator @@ -1,7 +1,7 @@ # Copyright Contributors to the Open Cluster Management project # Licensed under the Apache License 2.0 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.20 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.21 AS builder RUN GOBIN=/usr/local/bin go install github.com/brancz/gojsontoyaml@latest diff --git a/tools/simulator/metrics-collector/metrics-extractor/Dockerfile b/tools/simulator/metrics-collector/metrics-extractor/Dockerfile index 690cf4d3a..183fd5b02 100644 --- a/tools/simulator/metrics-collector/metrics-extractor/Dockerfile +++ b/tools/simulator/metrics-collector/metrics-extractor/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/stolostron/builder:go1.20-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.21-linux AS builder RUN GOBIN=/usr/local/bin go install github.com/brancz/gojsontoyaml@latest From 103e6d4cba7ce2c35bc530dc3b82480c1e65c9fb Mon Sep 17 00:00:00 2001 From: Philip Gough Date: Mon, 20 May 2024 11:13:02 +0100 Subject: [PATCH 20/33] Validate url for external remote write endpoint (#1432) Signed-off-by: Philip Gough --- .../observatorium.go | 11 +++- .../observatorium_test.go | 3 +- .../pkg/util/remotewriteendpoint.go | 20 ++++++++ .../pkg/util/remotewriteendpoint_test.go | 51 +++++++++++++++++++ 4 files changed, 81 insertions(+), 4 deletions(-) diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go index 9dfae3f85..98db22661 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go @@ -533,8 +533,8 @@ func newAPISpec(c client.Client, mco *mcov1beta2.MultiClusterObservability) (obs apiSpec.ImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) apiSpec.ServiceMonitor = true if mco.Spec.StorageConfig.WriteStorage != nil { - eps := []mcoutil.RemoteWriteEndpointWithSecret{} - mountSecrets := []string{} + var eps []mcoutil.RemoteWriteEndpointWithSecret + var mountSecrets []string for _, storageConfig := range mco.Spec.StorageConfig.WriteStorage { storageSecret := &v1.Secret{} err := c.Get(context.TODO(), types.NamespacedName{Name: storageConfig.Name, @@ -560,6 +560,13 @@ func newAPISpec(c client.Client, mco *mcov1beta2.MultiClusterObservability) (obs log.Error(err, "Failed to unmarshal data in secret", "name", storageConfig.Name) return apiSpec, err } + + err = ep.Validate() + if err != nil { + log.Error(err, "Failed to validate data in secret", "name", storageConfig.Name) + return apiSpec, err + } + newEp := &mcoutil.RemoteWriteEndpointWithSecret{ Name: storageConfig.Name, URL: ep.URL, diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go index 993a9e006..7d07fa0c4 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go @@ -90,8 +90,7 @@ func TestNewDefaultObservatoriumSpec(t *testing.T) { }, Type: "Opaque", Data: map[string][]byte{ - "write_key": []byte(`url: http://remotewrite/endpoint -`), + "write_key": []byte(`url: http://remotewrite/endpoint`), }, } diff --git a/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go b/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go index 221391705..6280c09ac 100644 --- a/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go +++ b/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go @@ -5,6 +5,8 @@ package util import ( + "fmt" + "net/url" "path" "github.com/prometheus/common/config" @@ -95,6 +97,24 @@ type RemoteWriteEndpointWithSecret struct { HttpClientConfig *HTTPClientConfigWithSecret `yaml:"http_client_config,omitempty" json:"http_client_config,omitempty"` } +// Validate validates the remote write endpoint +func (res *RemoteWriteEndpointWithSecret) Validate() error { + if res.URL.String() == "" { + return fmt.Errorf("url is required for remote write endpoint %s", res.Name) + } + + u, err := url.ParseRequestURI(res.URL.String()) + if err != nil { + return fmt.Errorf("url %s is invalid for remote write endpoint %s: %s", res.URL.String(), res.Name, err) + } + + if u.Scheme == "" || u.Scheme != "http" && u.Scheme != "https" { + return fmt.Errorf("url %s is invalid for remote write endpoint %s: scheme must be http or https", res.URL.String(), res.Name) + } + + return nil +} + func getMountPath(secretName, key string) string { return path.Join(MountPath, secretName, key) } diff --git a/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go b/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go index dc28d7c76..b6e7d9707 100644 --- a/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go +++ b/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go @@ -5,8 +5,11 @@ package util import ( + "net/url" "path" "testing" + + "github.com/prometheus/common/config" ) const ( @@ -84,3 +87,51 @@ func TestTransform(t *testing.T) { t.Fatalf("Wrong number of mount secrets: expect 5, get %d", len(names)) } } + +func TestValidateRemoteWriteEndpointWithSecret(t *testing.T) { + testCases := []struct { + name string + endpoint *RemoteWriteEndpointWithSecret + wantErr bool + }{ + { + name: "test missing url", + endpoint: &RemoteWriteEndpointWithSecret{Name: "valid-name", URL: mustParseURL(t, "")}, + wantErr: true, + }, + { + name: "test invalid url no scheme", + endpoint: &RemoteWriteEndpointWithSecret{Name: "valid-name", URL: mustParseURL(t, "invalid-url")}, + wantErr: true, + }, + { + name: "test valid url invalid scheme", + endpoint: &RemoteWriteEndpointWithSecret{Name: "valid-name", URL: mustParseURL(t, "httttp://some-valid-host.com:8080/prometheus/api/v1/write")}, + wantErr: true, + }, + { + name: "test happy path", + endpoint: &RemoteWriteEndpointWithSecret{Name: "valid-name", URL: mustParseURL(t, "https://example.com")}, + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.endpoint.Validate() + if (err != nil) != tc.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tc.wantErr) + return + } + }) + } +} + +func mustParseURL(t *testing.T, s string) config.URL { + u, err := url.Parse(s) + if err != nil { + t.Fatalf(err.Error()) + } + + return config.URL{URL: u} +} From 071c7f9b757a31545d420af47f844477e546c551 Mon Sep 17 00:00:00 2001 From: Subbarao Meduri Date: Tue, 21 May 2024 08:50:06 -0400 Subject: [PATCH 21/33] precreate and set permissions for /workspace/.kube directory (#1443) Signed-off-by: Subbarao Meduri --- tests/Containerfile.operator | 4 ++-- tests/Dockerfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/Containerfile.operator b/tests/Containerfile.operator index efa7a4609..81a3e3bf3 100644 --- a/tests/Containerfile.operator +++ b/tests/Containerfile.operator @@ -15,8 +15,8 @@ RUN go install github.com/onsi/ginkgo/ginkgo@v1.14.2 && go mod vendor && ginkgo FROM registry.access.redhat.com/ubi8/ubi-minimal:latest # pre-create directories and set permissions -RUN mkdir -p /resources /results && \ - chown -R 1001:1001 /resources /results +RUN mkdir -p /resources /results /workspace/.kube && \ + chown -R 1001:1001 /resources /results /workspace/.kube # run as non-root USER 1001:1001 diff --git a/tests/Dockerfile b/tests/Dockerfile index 2c05917a7..20fe5f54f 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -12,8 +12,8 @@ RUN go install github.com/onsi/ginkgo/ginkgo@v1.14.2 && go mod vendor && ginkgo FROM registry.access.redhat.com/ubi8/ubi-minimal:latest # pre-create directories and set permissions -RUN mkdir -p /resources /results && \ - chown -R 1001:1001 /resources /results +RUN mkdir -p /resources /results /workspace/.kube && \ + chown -R 1001:1001 /resources /results /workspace/.kube # run as non-root USER 1001:1001 From 0402d976cacefa99778186a5baa74b4e583d9a42 Mon Sep 17 00:00:00 2001 From: Douglas Camata <159076+douglascamata@users.noreply.github.com> Date: Tue, 21 May 2024 17:59:52 +0200 Subject: [PATCH 22/33] [ACM-11722] Add support for alertmanager path prefix and validate URL (#1442) * Add support for alertmanager path prefix and validate URL Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Force a protocol on the AM url Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Reconcile placement controller when custom AM url changes in the mco Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Fix mco predicate issue with nil advanced config Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Only accept https for custom alertmanager url Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --------- Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --- .../ocp_monitoring_config.go | 14 +++++++++++--- .../shared/multiclusterobservability_shared.go | 4 ++-- ...nagement.io_multiclusterobservabilities.yaml | 4 ++-- ...nagement.io_multiclusterobservabilities.yaml | 4 ++-- .../placementrule/hub_info_secret.go | 5 +++++ .../placementrule/hub_info_secret_test.go | 4 ++-- .../controllers/placementrule/mco_predicate.go | 17 ++++++++++++++--- 7 files changed, 38 insertions(+), 14 deletions(-) diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go index 48faa13fa..88b058ca0 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go @@ -8,8 +8,8 @@ import ( "context" "encoding/json" "fmt" + "net/url" "reflect" - "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -254,7 +254,7 @@ func getAmAccessorToken(ctx context.Context, client client.Client) (string, erro } func newAdditionalAlertmanagerConfig(hubInfo *operatorconfig.HubInfo) cmomanifests.AdditionalAlertmanagerConfig { - return cmomanifests.AdditionalAlertmanagerConfig{ + config := cmomanifests.AdditionalAlertmanagerConfig{ Scheme: "https", PathPrefix: "/", APIVersion: "v2", @@ -273,8 +273,16 @@ func newAdditionalAlertmanagerConfig(hubInfo *operatorconfig.HubInfo) cmomanifes }, Key: hubAmAccessorSecretKey, }, - StaticConfigs: []string{strings.TrimPrefix(hubInfo.AlertmanagerEndpoint, "https://")}, + StaticConfigs: []string{}, } + amURL, err := url.Parse(hubInfo.AlertmanagerEndpoint) + if err != nil { + return config + } + + config.PathPrefix = amURL.Path + config.StaticConfigs = append(config.StaticConfigs, amURL.Host) + return config } // createOrUpdateClusterMonitoringConfig creates or updates the configmap diff --git a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go index 7a79487c2..9bbf588cd 100644 --- a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go +++ b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go @@ -15,8 +15,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// URL is kubebuilder type that validates the containing string is an URL. -// +kubebuilder:validation:Pattern=`^https?:\/\/` +// URL is kubebuilder type that validates the containing string is an HTTPS URL. +// +kubebuilder:validation:Pattern=`^https:\/\/` // +kubebuilder:validation:MaxLength=2083 type URL string diff --git a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml index 473330a3e..98d5dc6d7 100644 --- a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -1151,12 +1151,12 @@ spec: customAlertmanagerHubURL: description: CustomAlertmanagerHubURL overrides the alertmanager URL to send alerts from the spoke to the hub server. For the alertmanager that runs in the hub this setting has no effect. maxLength: 2083 - pattern: ^https?:\/\/ + pattern: ^https:\/\/ type: string customObservabilityHubURL: description: CustomObservabilityHubURL overrides the endpoint used by the metrics-collector to send metrics to the hub server. For the metrics-collector that runs in the hub this setting has no effect. maxLength: 2083 - pattern: ^https?:\/\/ + pattern: ^https:\/\/ type: string grafana: description: The spec of grafana diff --git a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml index 7534e7de6..fe9d8b733 100644 --- a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -1766,7 +1766,7 @@ spec: URL to send alerts from the spoke to the hub server. For the alertmanager that runs in the hub this setting has no effect. maxLength: 2083 - pattern: ^https?:\/\/ + pattern: ^https:\/\/ type: string customObservabilityHubURL: description: CustomObservabilityHubURL overrides the endpoint @@ -1774,7 +1774,7 @@ spec: For the metrics-collector that runs in the hub this setting has no effect. maxLength: 2083 - pattern: ^https?:\/\/ + pattern: ^https:\/\/ type: string grafana: description: The spec of grafana diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go index 3ece1f486..29f862681 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go @@ -7,6 +7,7 @@ package placementrule import ( "context" "net/url" + "strings" "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" @@ -37,6 +38,10 @@ func generateHubInfoSecret(client client.Client, obsNamespace string, // if alerting is disabled, do not set alertmanagerEndpoint if !config.IsAlertingDisabled() { alertmanagerEndpoint, err = config.GetAlertmanagerEndpoint(context.TODO(), client, obsNamespace) + if !strings.HasPrefix(alertmanagerEndpoint, "https://") { + alertmanagerEndpoint = "https://" + alertmanagerEndpoint + } + if err != nil { log.Error(err, "Failed to get alertmanager endpoint") return nil, err diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go index c9c0d0fc1..15eb94158 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go @@ -135,7 +135,7 @@ func TestNewSecret(t *testing.T) { if err != nil { t.Fatalf("Failed to unmarshal data in hub info secret (%v)", err) } - if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != routeHost || hub.AlertmanagerRouterCA != routerCA { + if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != "https://"+routeHost || hub.AlertmanagerRouterCA != routerCA { t.Fatalf("Wrong content in hub info secret: \ngot: "+hub.ObservatoriumAPIEndpoint+" "+hub.AlertmanagerEndpoint+" "+hub.AlertmanagerRouterCA, clusterName+" "+"https://test-host"+" "+"test-host"+" "+routerCA) } } @@ -155,7 +155,7 @@ func TestNewBYOSecret(t *testing.T) { if err != nil { t.Fatalf("Failed to unmarshal data in hub info secret (%v)", err) } - if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != routeHost || hub.AlertmanagerRouterCA != routerBYOCA { + if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != "https://"+routeHost || hub.AlertmanagerRouterCA != routerBYOCA { t.Fatalf("Wrong content in hub info secret: \ngot: "+hub.ObservatoriumAPIEndpoint+" "+hub.AlertmanagerEndpoint+" "+hub.AlertmanagerRouterCA, clusterName+" "+"https://test-host"+" "+"test-host"+" "+routerBYOCA) } } diff --git a/operators/multiclusterobservability/controllers/placementrule/mco_predicate.go b/operators/multiclusterobservability/controllers/placementrule/mco_predicate.go index 6b52330bc..001774d10 100644 --- a/operators/multiclusterobservability/controllers/placementrule/mco_predicate.go +++ b/operators/multiclusterobservability/controllers/placementrule/mco_predicate.go @@ -36,18 +36,29 @@ func getMCOPred(c client.Client, ingressCtlCrdExists bool) predicate.Funcs { }, UpdateFunc: func(e event.UpdateEvent) bool { retval := false - mco := e.ObjectNew.(*mcov1beta2.MultiClusterObservability) + updateHubInfo := false + newMCO := e.ObjectNew.(*mcov1beta2.MultiClusterObservability) + oldMCO := e.ObjectOld.(*mcov1beta2.MultiClusterObservability) oldAlertingStatus := config.IsAlertingDisabled() - newAlertingStatus := config.IsAlertingDisabledInSpec(mco) + newAlertingStatus := config.IsAlertingDisabledInSpec(newMCO) + + if !reflect.DeepEqual(newMCO.Spec.AdvancedConfig, oldMCO.Spec.AdvancedConfig) { + updateHubInfo = true + retval = true + } // if value changed, then mustReconcile is true if oldAlertingStatus != newAlertingStatus { config.SetAlertingDisabled(newAlertingStatus) + retval = true + updateHubInfo = true + } + + if updateHubInfo { var err error hubInfoSecret, err = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) if err != nil { log.Error(err, "unable to get HubInfoSecret", "controller", "PlacementRule") } - retval = true } // only reconcile when ObservabilityAddonSpec updated From 2d621705dfc60654194e767585dcfa6d37622863 Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Wed, 22 May 2024 11:57:13 +0200 Subject: [PATCH 23/33] Improve e2e logs (#1435) * change test ContainManagedClusterMetric Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * improve e2e test logging Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * add kube_debug file Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * remove unused functions Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * reduce log lines Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * log statefulsets and daemonsets Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * add copyright Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * fix pods list, add cm and secrets list Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --------- Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .../observability-e2e-test_suite_test.go | 6 +- tests/pkg/tests/observability_addon_test.go | 18 +- tests/pkg/tests/observability_alert_test.go | 34 +- .../pkg/tests/observability_certrenew_test.go | 23 +- tests/pkg/tests/observability_config_test.go | 4 +- .../pkg/tests/observability_dashboard_test.go | 4 +- .../observability_endpoint_preserve_test.go | 4 +- tests/pkg/tests/observability_export_test.go | 50 +- .../tests/observability_grafana_dev_test.go | 4 +- tests/pkg/tests/observability_grafana_test.go | 11 +- tests/pkg/tests/observability_install_test.go | 4 +- .../tests/observability_manifestwork_test.go | 21 +- tests/pkg/tests/observability_metrics_test.go | 66 +-- ...servability_observatorium_preserve_test.go | 34 +- .../pkg/tests/observability_reconcile_test.go | 4 +- .../pkg/tests/observability_retention_test.go | 4 +- tests/pkg/tests/observability_route_test.go | 4 +- .../pkg/tests/observability_uninstall_test.go | 2 +- tests/pkg/utils/cluster_deploy.go | 44 -- tests/pkg/utils/install_config.go | 56 --- tests/pkg/utils/kube_debug.go | 434 ++++++++++++++++++ tests/pkg/utils/mco_configmaps.go | 3 +- tests/pkg/utils/mco_deploy.go | 271 +---------- tests/pkg/utils/mco_metric.go | 80 ++-- tests/pkg/utils/mco_pods.go | 10 - tests/pkg/utils/utils.go | 180 +------- 26 files changed, 659 insertions(+), 716 deletions(-) delete mode 100644 tests/pkg/utils/cluster_deploy.go delete mode 100644 tests/pkg/utils/install_config.go create mode 100644 tests/pkg/utils/kube_debug.go diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index ef94ef363..4575459e7 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -13,6 +13,7 @@ import ( "time" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" "gopkg.in/yaml.v2" @@ -129,7 +130,10 @@ func init() { func TestObservabilityE2E(t *testing.T) { RegisterFailHandler(Fail) + config.DefaultReporterConfig.NoColor = true + config.DefaultReporterConfig.Succinct = true junitReporter := reporters.NewJUnitReporter(reportFile) + junitReporter.ReporterConfig.NoColor = true RunSpecsWithDefaultAndCustomReporters(t, "Observability E2E Suite", []Reporter{junitReporter}) } @@ -141,8 +145,6 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { if !testFailed { uninstallMCO() - } else { - utils.PrintAllMCOPodsStatus(testOptions) } }) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index e5edff902..3318c6541 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -96,17 +96,18 @@ var _ = Describe("Observability:", func() { It("[Stable] Waiting for check no metric data in grafana console", func() { Eventually(func() error { for _, cluster := range clusters { - err, hasMetric := utils.ContainManagedClusterMetric( + res, err := utils.QueryGrafana( testOptions, `timestamp(node_memory_MemAvailable_bytes{cluster="`+cluster+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+cluster+`"} offset 1m) > 59`, - []string{`"__name__":"node_memory_MemAvailable_bytes"`}, ) - if err != nil && !hasMetric && - strings.Contains(err.Error(), "failed to find metric name from response") { - return nil + if err != nil { + return err + } + if len(res.Data.Result) != 0 { + return fmt.Errorf("Grafa console still has metric data: %v", res.Data.Result) } } - return fmt.Errorf("Check no metric data in grafana console error: %w", err) + return nil }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) }) @@ -210,10 +211,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) - utils.PrintManagedClusterOBAObject(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 7a2766106..95a3ab6ed 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -186,9 +186,15 @@ var _ = Describe("Observability:", func() { By("Checking alert generated") Eventually(func() error { - err, _ := utils.ContainManagedClusterMetric(testOptions, `ALERTS{`+labelName+`="`+labelValue+`"}`, - []string{`"__name__":"ALERTS"`, `"` + labelName + `":"` + labelValue + `"`}) - return err + query := fmt.Sprintf(`ALERTS{%s="%s"}`, labelName, labelValue) + res, err := utils.QueryGrafana(testOptions, query) + if err != nil { + return err + } + if len(res.Data.Result) == 0 { + return fmt.Errorf("no data found for %s", query) + } + return nil }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) @@ -212,6 +218,7 @@ var _ = Describe("Observability:", func() { It("[P2][Sev2][observability][Stable] Should have custom alert updated (alert/g0)", func() { By("Updating custom alert rules") + // Replace preceding custom alert with new one that cannot fire yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_invalid"}, ) @@ -231,12 +238,21 @@ var _ = Describe("Observability:", func() { By("Checking alert generated") Eventually( func() error { - err, _ := utils.ContainManagedClusterMetric(testOptions, `ALERTS{`+labelName+`="`+labelValue+`"}`, - []string{`"__name__":"ALERTS"`, `"` + labelName + `":"` + labelValue + `"`}) - return err + query := fmt.Sprintf(`ALERTS{%s="%s"}`, labelName, labelValue) + res, err := utils.QueryGrafana(testOptions, query) + if err != nil { + return err + } + + if len(res.Data.Result) != 0 { + // No alert should be generated + return fmt.Errorf("alert should not be generated, got %v", res) + } + + return nil }, EventuallyTimeoutMinute*5, - EventuallyIntervalSecond*5).Should(MatchError("failed to find metric name from response")) + EventuallyIntervalSecond*5).Should(Succeed()) }) It("[P2][Sev2][observability][Stable] delete the customized rules (alert/g0)", func() { @@ -389,9 +405,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index c5e765759..5a5d45dd2 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -140,18 +140,19 @@ func runCertRenewTests(clusterConfig utils.Cluster) { namespace, "component=metrics-collector", ) - if err == nil { - for _, pod := range podList.Items { - if pod.Name != collectorPodName { - if pod.Status.Phase != "Running" { - klog.V(1).Infof("<%s> not in Running status yet", pod.Name) - return false - } - return true + if err != nil { + klog.V(1).Infof("Failed to get pod list: %v", err) + } + for _, pod := range podList.Items { + if pod.Name != collectorPodName { + if pod.Status.Phase != "Running" { + klog.V(1).Infof("<%s> not in Running status yet", pod.Name) + return false } + return true } - } + // debug code to check label "cert/time-restarted" deployment, err := utils.GetDeployment( clusterConfig, @@ -172,9 +173,7 @@ func runCertRenewTests(clusterConfig utils.Cluster) { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed namespace = MCO_ADDON_NAMESPACE diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 00b63d9ec..10336a7ed 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -279,9 +279,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 4518f71c7..33e8d801a 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -73,9 +73,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index fc461baee..c2a464077 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -202,9 +202,7 @@ func runMetricsCollectorTests(clusterConfig utils.Cluster) { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } namespace = MCO_ADDON_NAMESPACE testFailed = testFailed || CurrentGinkgoTestDescription().Failed diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index 86364cc72..48b077de4 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -5,7 +5,6 @@ package tests import ( - "errors" "fmt" "os" @@ -65,47 +64,42 @@ var _ = Describe("Observability:", func() { By("Waiting for metrics acm_remote_write_requests_total on grafana console") Eventually(func() error { query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", hubClusterName) - err, _ := utils.ContainManagedClusterMetric( + res, err := utils.QueryGrafana( testOptions, query, - []string{`"__name__":"acm_remote_write_requests_total"`}, ) if err != nil { return err } - err, _ = utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`, - `"code":"200`, `"name":"thanos-receiver"`}, - ) - if err != nil { - return errors.New("metrics not forwarded to thanos-receiver") + if len(res.Data.Result) == 0 { + return fmt.Errorf("metric %s not found in response", query) } - err, _ = utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`, - `"code":"204`, `"name":"victoriametrics"`}, - ) - if err != nil { - return errors.New("metrics not forwarded to victoriametrics") + + // Check if the metric is forwarded to thanos-receiver + labelSet := map[string]string{"code": "200", "name": "thanos-receiver"} + if !res.ContainsLabelsSet(labelSet) { + return fmt.Errorf("labels %v not found in response: %v", labelSet, res) + } + + // Check if the metric is forwarded to victoriametrics + labelSet = map[string]string{"code": "204", "name": "victoriametrics"} + if !res.ContainsLabelsSet(labelSet) { + return fmt.Errorf("labels %v not found in response: %v", labelSet, res) } + return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) JustAfterEach(func() { - Expect(utils.CleanExportResources(testOptions)).NotTo(HaveOccurred()) - Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) - }) - - AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) + + AfterEach(func() { + Expect(utils.CleanExportResources(testOptions)).NotTo(HaveOccurred()) + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) }) diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 09ed3da07..13fe02af2 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -35,9 +35,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 72fba5f1b..ae2186c31 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -22,14 +22,17 @@ var _ = Describe("Observability:", func() { } for _, cluster := range clusters { query := fmt.Sprintf("node_memory_MemAvailable_bytes{cluster=\"%s\"}", cluster) - err, _ = utils.ContainManagedClusterMetric( + res, err := utils.QueryGrafana( testOptions, query, - []string{`"__name__":"node_memory_MemAvailable_bytes"`}, ) if err != nil { return err } + + if len(res.Data.Result) == 0 { + return fmt.Errorf("no data found for %s", query) + } } return nil }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) @@ -41,9 +44,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_install_test.go b/tests/pkg/tests/observability_install_test.go index 90340c11b..485196ba3 100644 --- a/tests/pkg/tests/observability_install_test.go +++ b/tests/pkg/tests/observability_install_test.go @@ -190,7 +190,7 @@ func installMCO() { mcoLogs, err := utils.GetPodLogs(testOptions, true, mcoNs, mcoPod, "multicluster-observability-operator", false, 1000) Expect(err).NotTo(HaveOccurred()) fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed failed, checking MCO operator logs:\n%s\n", mcoLogs) - utils.PrintAllMCOPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) }() By("Waiting for MCO ready status") @@ -214,7 +214,7 @@ func installMCO() { } fmt.Fprintf(GinkgoWriter, "[DEBUG] Addon failed, checking pods:\n") - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) }() By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 418fbceb1..dbab33fd9 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -7,6 +7,7 @@ package tests import ( "context" "errors" + "fmt" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -97,12 +98,20 @@ var _ = Describe("Observability:", func() { It("[Stable] Checking metric to ensure that no data is lost in 1 minute", func() { Eventually(func() error { - err, _ = utils.ContainManagedClusterMetric( + query := fmt.Sprintf(`timestamp(node_memory_MemAvailable_bytes{cluster="%s"}) - timestamp(node_memory_MemAvailable_bytes{cluster="%s"} offset 1m) > 59`, clusterName, clusterName) + res, err := utils.QueryGrafana( testOptions, - `timestamp(node_memory_MemAvailable_bytes{cluster="`+clusterName+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+clusterName+`"} offset 1m) > 59`, - []string{`"__name__":"node_memory_MemAvailable_bytes"`}, + query, ) - return err + if err != nil { + return err + } + + if len(res.Data.Result) == 0 { + return fmt.Errorf("no data found for %s", query) + } + + return nil }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*3).Should(Succeed()) }) } @@ -114,9 +123,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 22f9586ea..c1de483c5 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -11,7 +11,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" @@ -22,9 +21,8 @@ const ( ) var ( - clusters []string - clusterError error - metricslistError error + clusters []string + clusterError error ) var _ = Describe("Observability:", func() { @@ -61,14 +59,17 @@ var _ = Describe("Observability:", func() { Eventually(func() error { for _, cluster := range clusters { query := fmt.Sprintf("node_memory_Active_bytes{cluster=\"%s\"} offset 1m", cluster) - err, _ := utils.ContainManagedClusterMetric( + res, err := utils.QueryGrafana( testOptions, query, - []string{`"__name__":"node_memory_Active_bytes"`}, ) if err != nil { return err } + + if len(res.Data.Result) == 0 { + return fmt.Errorf("no data found for %s", query) + } } return nil }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) @@ -83,13 +84,17 @@ var _ = Describe("Observability:", func() { cluster, cluster, ) - metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) - if metricslistError == nil { - return nil + res, err := utils.QueryGrafana(testOptions, query) + if err != nil { + return err + } + // there should be no data for the deleted metric + if len(res.Data.Result) != 0 { + return fmt.Errorf("metric %s found in response: %v", query, res) } } - return metricslistError - }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("failed to find metric name from response")) + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) It("[P2][Sev2][observability][Integration] Should have no metrics which have been marked for deletion in matches section (metrics/g0)", func() { @@ -101,13 +106,16 @@ var _ = Describe("Observability:", func() { cluster, cluster, ) - metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) - if metricslistError == nil { - return nil + res, err := utils.QueryGrafana(testOptions, query) + if err != nil { + return err + } + if len(res.Data.Result) != 0 { + return fmt.Errorf("metric %s found in response: %v", query, res) } } - return metricslistError - }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("failed to find metric name from response")) + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) It("[P2][Sev2][observability][Integration] Should have no metrics after custom metrics allowlist deleted (metrics/g0)", func() { @@ -127,13 +135,16 @@ var _ = Describe("Observability:", func() { cluster, cluster, ) - metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) - if metricslistError == nil { - return nil + res, err := utils.QueryGrafana(testOptions, query) + if err != nil { + return err + } + if len(res.Data.Result) != 0 { + return fmt.Errorf("metric %s found in response: %v", query, res) } } - return metricslistError - }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("failed to find metric name from response")) + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) It("[P2][Sev2][observability][Integration] Should have metrics which used grafana dashboard (ssli/g1)", func() { @@ -157,11 +168,14 @@ var _ = Describe("Observability:", func() { _, ok := ignoreMetricMap[name] if !ok { Eventually(func() error { - err, _ := utils.ContainManagedClusterMetric(testOptions, name, []string{name}) + res, err := utils.QueryGrafana(testOptions, name) if err != nil { - klog.V(1).Infof("failed to get metrics %s", name) + return fmt.Errorf("failed to get metrics %s: %v", name, err) } - return err + if len(res.Data.Result) == 0 { + return fmt.Errorf("no data found for %s", name) + } + return nil }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) } } @@ -173,9 +187,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index bae4c7493..b099e4ecd 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -6,6 +6,8 @@ package tests import ( "context" + "errors" + "fmt" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -65,21 +67,25 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*1).Should(BeTrue()) // ensure the thanos compact is restarted - Eventually(func() bool { + Eventually(func() error { sts, err := utils.GetStatefulSetWithLabel(testOptions, true, THANOS_COMPACT_LABEL, MCO_NAMESPACE) - if err == nil { - if (*sts).Items[0].ResourceVersion != oldCompactResourceVersion { - argList := (*sts).Items[0].Spec.Template.Spec.Containers[0].Args - for _, arg := range argList { - if arg != "--retention.resolution-raw="+updateRetention { - return true - } - } - return false + if err != nil { + return err + } + if sts.Items[0].ResourceVersion != oldCompactResourceVersion { + return errors.New("The thanos compact pod is not restarted. ResourceVersion has not changed.") + } + + argList := sts.Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + // check if the retention resolution is reverted to the original value + if arg == "--retention.resolution-raw="+updateRetention { + return fmt.Errorf("The thanos compact pod is not restarted with the new retention resolution. Args: %v", argList) } } - return false - }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(BeTrue()) + + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) By("Wait for thanos compact pods are ready") sts, err := utils.GetStatefulSetWithLabel(testOptions, true, THANOS_COMPACT_LABEL, MCO_NAMESPACE) @@ -102,9 +108,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 8ed55bac4..c37470a64 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -201,9 +201,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index 66b207cf0..acd9ea220 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -177,9 +177,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 37c5099f5..9b01930e9 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -195,9 +195,7 @@ var _ = Describe("Observability:", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - utils.PrintMCOObject(testOptions) - utils.PrintAllMCOPodsStatus(testOptions) - utils.PrintAllOBAPodsStatus(testOptions) + utils.LogFailingTestStandardDebugInfo(testOptions) } testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) diff --git a/tests/pkg/tests/observability_uninstall_test.go b/tests/pkg/tests/observability_uninstall_test.go index 5dbb86f8f..0dc0f89b4 100644 --- a/tests/pkg/tests/observability_uninstall_test.go +++ b/tests/pkg/tests/observability_uninstall_test.go @@ -56,7 +56,7 @@ func uninstallMCO() { Namespace(MCO_ADDON_NAMESPACE). Get(context.TODO(), name, metav1.GetOptions{}) if instance != nil { - utils.PrintManagedClusterOBAObject(testOptions) + utils.PrintObject(context.Background(), clientDynamic, utils.NewMCOAddonGVR(), MCO_ADDON_NAMESPACE, "observability-addon") return errors.New("Failed to delete MCO addon instance") } return nil diff --git a/tests/pkg/utils/cluster_deploy.go b/tests/pkg/utils/cluster_deploy.go deleted file mode 100644 index c16ecf2a3..000000000 --- a/tests/pkg/utils/cluster_deploy.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project -// Licensed under the Apache License 2.0 - -package utils - -// ClusterDeploy defines the data passed to Hive. -type ClusterDeploy struct { - Kind string `yaml:"kind"` - APIVersion string `yaml:"apiVersion"` - Items []Items `yaml:"items"` -} - -// Items defines the list of items in the cluster deploy yaml. -type Items struct { - Kind string `yaml:"kind"` - Metadata Metadata `yaml:"metadata"` - StringData StringData `yaml:"stringData,omitempty"` - Spec Spec `yaml:"spec,omitempty"` -} - -// Metadata defines the name. -type Metadata struct { - Name string `yaml:"name,omitempty"` -} - -// StringData defiines the ssh values. -type StringData struct { - Dockerconfigjson string `yaml:".dockerconfigjson,omitempty"` - SSHPrivateKey string `yaml:"ssh-privatekey,omitempty"` -} - -// Spec defines the kube specifications. -type Spec struct { - BaseDomain string `yaml:"baseDomain,omitempty"` - ClusterName string `yaml:"clusterName,omitempty"` - Provisioning Provisioning `yaml:"provisioning,omitempty"` -} - -// Provisioning defines the data related to cluster creation. -type Provisioning struct { - ReleaseImage string `yaml:"releaseImage,omitempty"` - SSHKnownHosts []string `yaml:"sshKnownHosts,omitempty"` -} diff --git a/tests/pkg/utils/install_config.go b/tests/pkg/utils/install_config.go deleted file mode 100644 index 298fefde5..000000000 --- a/tests/pkg/utils/install_config.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project -// Licensed under the Apache License 2.0 - -package utils - -// InstallConfig definition for install config structure from install-config.yaml. -type InstallConfig struct { - BaseDomain string `yaml:"baseDomain,omitempty"` - Networking Networking `yaml:"networking,omitempty"` - Metadata Metadata `yaml:"metadata"` - Platform Platform `yaml:"platform,omitempty"` - PullSecret string `yaml:"pullSecret,omitempty"` - SSHKey string `yaml:"sshKey,omitempty"` -} - -// Networking definition. -type Networking struct { - NetworkType string `yaml:"networkType"` - MachineCIDR string `yaml:"machineCIDR"` -} - -// Platform definition. -type Platform struct { - Baremetal Baremetal `yaml:"baremetal,omitempty"` -} - -// Baremetal specs for target baremetal provisioning. -type Baremetal struct { - ExternalBridge string `yaml:"externalBridge,omitempty"` - ProvisioningBridge string `yaml:"provisioningBridge,omitempty"` - LibvirtURI string `yaml:"libvirtURI,omitempty"` - ProvisioningNetworkInterface string `yaml:"provisioningNetworkInterface,omitempty"` - ProvisioningNetworkCIDR string `yaml:"provisioningNetworkCIDR,omitempty"` - APIVIP string `yaml:"apiVIP,omitempty"` - DNSVIP string `yaml:"dnsVIP,omitempty"` - IngressVIP string `yaml:"ingressVIP,omitempty"` - Hosts []Host `yaml:"hosts,omitempty"` - SSHKnownHosts string `yaml:"sshKnownHosts,omitempty"` -} - -// Host is an array of baremetal assets. -type Host struct { - Name string `yaml:"name"` - Role string `yaml:"role"` - Bmc Bmc `yaml:"bmc"` - BootMACAddress string `yaml:"bootMACAddress"` - HardwareProfile string `yaml:"hardwareProfile"` -} - -// Bmc definition. -type Bmc struct { - Address string `yaml:"address"` - Username string `yaml:"username"` - Password string `yaml:"password"` -} diff --git a/tests/pkg/utils/kube_debug.go b/tests/pkg/utils/kube_debug.go new file mode 100644 index 000000000..14471825c --- /dev/null +++ b/tests/pkg/utils/kube_debug.go @@ -0,0 +1,434 @@ +// Copyright (c) Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +// Licensed under the Apache License 2.0 + +package utils + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "text/tabwriter" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" +) + +// LogFailingTestStandardDebugInfo logs standard debug info for failing tests. +// It scans workloads and pods from hub and managed clusters observability namespaces. +// It also prints MCO and OBA objects. +// If a workload or pod is not running, it prints the resource spec, status, events and logs if appropriate. +func LogFailingTestStandardDebugInfo(opt TestOptions) { + klog.V(1).Infof("Test failed, printing debug info. TestOptions: %+v", opt) + + // Print MCO object + hubDynClient := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + PrintObject(context.TODO(), hubDynClient, NewMCOGVRV1BETA2(), MCO_NAMESPACE, MCO_CR_NAME) + + // Check pods in hub + hubClient := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + CheckPodsInNamespace(hubClient, "open-cluster-management", []string{"multicluster-observability-operator"}, map[string]string{ + "name": "multicluster-observability-operator", + }) + CheckDeploymentsInNamespace(hubClient, MCO_NAMESPACE) + CheckStatefulSetsInNamespace(hubClient, MCO_NAMESPACE) + CheckDaemonSetsInNamespace(hubClient, MCO_NAMESPACE) + CheckPodsInNamespace(hubClient, MCO_NAMESPACE, []string{}, map[string]string{}) + printConfigMapsInNamespace(hubClient, MCO_NAMESPACE) + printSecretsInNamespace(hubClient, MCO_NAMESPACE) + + for _, mc := range opt.ManagedClusters { + if mc.Name == "local-cluster" { + // Skip local-cluster as same namespace as hub, and already checked + continue + } + + spokeDynClient := NewKubeClientDynamic(mc.ClusterServerURL, opt.KubeConfig, mc.KubeContext) + PrintObject(context.TODO(), spokeDynClient, NewMCOAddonGVR(), MCO_ADDON_NAMESPACE, "observability-addon") + + spokeClient := NewKubeClient(mc.ClusterServerURL, mc.KubeConfig, mc.KubeContext) + CheckDeploymentsInNamespace(spokeClient, MCO_ADDON_NAMESPACE) + CheckStatefulSetsInNamespace(spokeClient, MCO_ADDON_NAMESPACE) + CheckDaemonSetsInNamespace(spokeClient, MCO_ADDON_NAMESPACE) + CheckPodsInNamespace(spokeClient, MCO_ADDON_NAMESPACE, []string{"observability-addon"}, map[string]string{}) + printConfigMapsInNamespace(spokeClient, MCO_ADDON_NAMESPACE) + printSecretsInNamespace(spokeClient, MCO_ADDON_NAMESPACE) + } +} + +// CheckPodsInNamespace lists pods in a namespace and logs debug info (status, events, logs) for pods not running. +func CheckPodsInNamespace(client kubernetes.Interface, ns string, forcePodNamesLog []string, podLabels map[string]string) { + listOptions := metav1.ListOptions{} + if len(podLabels) > 0 { + listOptions.LabelSelector = metav1.FormatLabelSelector(&metav1.LabelSelector{MatchLabels: podLabels}) + } + pods, err := client.CoreV1().Pods(ns).List(context.TODO(), listOptions) + if err != nil { + klog.Errorf("Failed to get pods in namespace %s: %v", ns, err) + return + } + + if len(pods.Items) == 0 { + klog.V(1).Infof("No pods in namespace %s", ns) + } + + klog.V(1).Infof("Checking %d pods in namespace %q", len(pods.Items), ns) + printPodsStatuses(pods.Items) + + notRunningPodsCount := 0 + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + notRunningPodsCount++ + } + + force := false + for _, forcePodName := range forcePodNamesLog { + if strings.Contains(pod.Name, forcePodName) { + force = true + break + } + } + if pod.Status.Phase == corev1.PodRunning && !force { + continue + } + + // print pod spec + podSpec, err := json.MarshalIndent(pod.Spec, "", " ") + if err != nil { + klog.Errorf("Failed to marshal pod %q spec: %s", pod.Name, err.Error()) + } + klog.V(1).Infof("Pod %q spec: \n%s", pod.Name, string(podSpec)) + + LogPodStatus(pod) + LogObjectEvents(client, ns, "Pod", pod.Name) + LogPodLogs(client, ns, pod) + } + + if notRunningPodsCount == 0 { + klog.V(1).Infof("All pods are running in namespace %q", ns) + } else { + klog.Errorf("Found %d pods not running in namespace %q", notRunningPodsCount, ns) + } +} + +func LogPodStatus(podList corev1.Pod) { + var podStatus strings.Builder + podStatus.WriteString(">>>>>>>>>> pod status >>>>>>>>>>\n") + podStatus.WriteString("Conditions:\n") + for _, condition := range podList.Status.Conditions { + podStatus.WriteString(fmt.Sprintf("\t%s: %s %v\n", condition.Type, condition.Status, condition.LastTransitionTime.Time)) + } + podStatus.WriteString("ContainerStatuses:\n") + for _, containerStatus := range podList.Status.ContainerStatuses { + podStatus.WriteString(fmt.Sprintf("\t%s: %t %d %v\n", containerStatus.Name, containerStatus.Ready, containerStatus.RestartCount, containerStatus.State)) + if containerStatus.LastTerminationState.Terminated != nil { + podStatus.WriteString(fmt.Sprintf("\t\tlastTerminated: %v\n", containerStatus.LastTerminationState.Terminated)) + } + } + podStatus.WriteString("<<<<<<<<<< pod status <<<<<<<<<<") + + klog.V(1).Infof("Pod %q is in phase %q and status: \n%s", podList.Name, podList.Status.Phase, podStatus.String()) +} + +func LogPodLogs(client kubernetes.Interface, ns string, pod corev1.Pod) { + for _, container := range pod.Spec.Containers { + logsRes := client.CoreV1().Pods(ns).GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: container.Name, + }).Do(context.Background()) + + if logsRes.Error() != nil { + klog.Errorf("Failed to get logs for pod %q: %s", pod.Name, logsRes.Error()) + continue + } + + logs, err := logsRes.Raw() + if err != nil { + klog.Errorf("Failed to get logs for pod %q container %q: %s", pod.Name, container.Name, err.Error()) + continue + } + + // Filter error logs and keep all last 100 lines + maxLines := 100 + cleanedLines := []string{} + lines := strings.Split(string(logs), "\n") + for i, line := range lines { + if strings.Contains(strings.ToLower(line), "error") || i > len(lines)-maxLines { + cleanedLines = append(cleanedLines, line) + } + } + + logs = []byte(strings.Join(cleanedLines, "\n")) + + delimitedLogs := fmt.Sprintf(">>>>>>>>>> container logs >>>>>>>>>>\n%s<<<<<<<<<< container logs <<<<<<<<<<", string(logs)) + klog.V(1).Infof("Pod %q container %q logs (errors and last %d lines): \n%s", pod.Name, container.Name, maxLines, delimitedLogs) + } +} + +func CheckDeploymentsInNamespace(client kubernetes.Interface, ns string) { + deployments, err := client.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("Failed to get deployments in namespace %s: %v", ns, err) + return + } + + if len(deployments.Items) == 0 { + klog.V(1).Infof("No deployments found in namespace %q", ns) + } + + klog.V(1).Infof("Deployments in namespace %s: \n", ns) + printDeploymentsStatuses(client, ns) + + for _, deployment := range deployments.Items { + if deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas { + continue + } + + // print deployment spec + deploymentSpec, err := json.MarshalIndent(deployment.Spec, "", " ") + if err != nil { + klog.Errorf("Failed to marshal deployment %q spec: %s", deployment.Name, err.Error()) + } + klog.V(1).Infof("Deployment %q spec: \n%s", deployment.Name, string(deploymentSpec)) + + LogDeploymentStatus(deployment) + LogObjectEvents(client, ns, "Deployment", deployment.Name) + } +} + +func LogDeploymentStatus(deployment appsv1.Deployment) { + var deploymentStatus strings.Builder + deploymentStatus.WriteString(">>>>>>>>>> deployment status >>>>>>>>>>\n") + deploymentStatus.WriteString(fmt.Sprintf("ReadyReplicas: %d\n", deployment.Status.ReadyReplicas)) + deploymentStatus.WriteString(fmt.Sprintf("UpdatedReplicas: %d\n", deployment.Status.UpdatedReplicas)) + deploymentStatus.WriteString(fmt.Sprintf("AvailableReplicas: %d\n", deployment.Status.AvailableReplicas)) + deploymentStatus.WriteString("Conditions:\n") + for _, condition := range deployment.Status.Conditions { + deploymentStatus.WriteString(fmt.Sprintf("\t%s: %s %v \n\t\t%s %s\n", condition.Type, condition.Status, condition.LastTransitionTime, condition.Message, condition.Reason)) + } + deploymentStatus.WriteString("<<<<<<<<<< deployment status <<<<<<<<<<") + + klog.V(1).Infof("Deployment %q status: \n%s", deployment.Name, deploymentStatus.String()) +} + +func CheckStatefulSetsInNamespace(client kubernetes.Interface, ns string) { + statefulSets, err := client.AppsV1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("Failed to get statefulsets in namespace %s: %v", ns, err) + return + } + + if len(statefulSets.Items) == 0 { + klog.V(1).Infof("No statefulsets found in namespace %q", ns) + return + } + + klog.V(1).Infof("StatefulSets in namespace %s: \n", ns) + printStatefulSetsStatuses(client, ns) + + for _, statefulSet := range statefulSets.Items { + if statefulSet.Status.UpdatedReplicas == *statefulSet.Spec.Replicas { + continue + } + + // Print statefulset spec + statefulSetSpec, err := json.MarshalIndent(statefulSet.Spec, "", " ") + if err != nil { + klog.Errorf("Failed to marshal statefulset %q spec: %s", statefulSet.Name, err.Error()) + } + klog.V(1).Infof("StatefulSet %q spec: \n%s", statefulSet.Name, string(statefulSetSpec)) + + LogObjectEvents(client, ns, "StatefulSet", statefulSet.Name) + } +} + +func CheckDaemonSetsInNamespace(client kubernetes.Interface, ns string) { + daemonSets, err := client.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("Failed to get daemonsets in namespace %s: %v", ns, err) + return + } + + if len(daemonSets.Items) == 0 { + klog.V(1).Infof("No daemonsets found in namespace %q", ns) + return + } + + klog.V(1).Infof("DaemonSets in namespace %s: \n", ns) + printDaemonSetsStatuses(client, ns) + + for _, daemonSet := range daemonSets.Items { + if daemonSet.Status.UpdatedNumberScheduled == daemonSet.Status.DesiredNumberScheduled { + continue + } + + // Print daemonset spec + daemonSetSpec, err := json.MarshalIndent(daemonSet.Spec, "", " ") + if err != nil { + klog.Errorf("Failed to marshal daemonset %q spec: %s", daemonSet.Name, err.Error()) + } + klog.V(1).Infof("DaemonSet %q spec: \n%s", daemonSet.Name, string(daemonSetSpec)) + + LogObjectEvents(client, ns, "DaemonSet", daemonSet.Name) + } +} + +func LogObjectEvents(client kubernetes.Interface, ns string, kind string, name string) { + fieldSelector := fmt.Sprintf("involvedObject.kind=%s,involvedObject.name=%s", kind, name) + events, err := client.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{ + FieldSelector: fieldSelector, + }) + if err != nil { + klog.Errorf("Failed to get events for %s %s: %s", kind, name, err.Error()) + return + } + + objectEvents := make([]string, 0, len(events.Items)) + for _, event := range events.Items { + objectEvents = append(objectEvents, fmt.Sprintf("%s %s (%d): %s", event.Reason, event.LastTimestamp, event.Count, event.Message)) + } + formattedEvents := fmt.Sprintf(">>>>>>>>>> %s events >>>>>>>>>>\n%s\n<<<<<<<<<< %s events <<<<<<<<<<", kind, strings.Join(objectEvents, "\n"), kind) + klog.V(1).Infof("%s %q events: \n%s", kind, name, formattedEvents) +} + +func printPodsStatuses(pods []corev1.Pod) { + writer := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(writer, "NAME\tSTATUS\tRESTARTS\tAGE") + for _, pod := range pods { + var restartCount int32 + if len(pod.Status.ContainerStatuses) > 0 { + restartCount = pod.Status.ContainerStatuses[0].RestartCount + } + age := time.Since(pod.CreationTimestamp.Time).Round(time.Second) + fmt.Fprintf(writer, "%s\t%s\t%d\t%s\n", + pod.Name, + pod.Status.Phase, + restartCount, + age) + } + writer.Flush() +} + +func printDeploymentsStatuses(clientset kubernetes.Interface, namespace string) { + deploymentsClient := clientset.AppsV1().Deployments(namespace) + deployments, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err.Error()) + } + + writer := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(writer, "NAME\tREADY\tUP-TO-DATE\tAVAILABLE\tAGE") + for _, deployment := range deployments.Items { + ready := fmt.Sprintf("%d/%d", deployment.Status.ReadyReplicas, *deployment.Spec.Replicas) + age := time.Since(deployment.CreationTimestamp.Time).Round(time.Second) + fmt.Fprintf(writer, "%s\t%s\t%d\t%d\t%s\n", + deployment.Name, + ready, + deployment.Status.UpdatedReplicas, + deployment.Status.AvailableReplicas, + age) + } + writer.Flush() +} + +func printStatefulSetsStatuses(clientset kubernetes.Interface, namespace string) { + statefulSetsClient := clientset.AppsV1().StatefulSets(namespace) + statefulSets, err := statefulSetsClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err.Error()) + } + + writer := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(writer, "NAME\tREADY\tAGE") + for _, statefulSet := range statefulSets.Items { + ready := fmt.Sprintf("%d/%d", statefulSet.Status.ReadyReplicas, *statefulSet.Spec.Replicas) + age := time.Since(statefulSet.CreationTimestamp.Time).Round(time.Second) + fmt.Fprintf(writer, "%s\t%s\t%s\n", + statefulSet.Name, + ready, + age) + } + writer.Flush() +} + +func printDaemonSetsStatuses(clientset kubernetes.Interface, namespace string) { + daemonSetsClient := clientset.AppsV1().DaemonSets(namespace) + daemonSets, err := daemonSetsClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err.Error()) + } + + writer := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(writer, "NAME\tDESIRED\tCURRENT\tREADY\tAGE") + for _, daemonSet := range daemonSets.Items { + age := time.Since(daemonSet.CreationTimestamp.Time).Round(time.Second) + fmt.Fprintf(writer, "%s\t%d\t%d\t%d\t%s\n", + daemonSet.Name, + daemonSet.Status.DesiredNumberScheduled, + daemonSet.Status.CurrentNumberScheduled, + daemonSet.Status.NumberReady, + age) + } + writer.Flush() +} + +func printConfigMapsInNamespace(client kubernetes.Interface, ns string) { + configMaps, err := client.CoreV1().ConfigMaps(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("Failed to get configmaps in namespace %q: %v", ns, err) + return + } + + if len(configMaps.Items) == 0 { + klog.V(1).Infof("No configmaps found in namespace %q", ns) + return + } + + klog.V(1).Infof("ConfigMaps in namespace %s: \n", ns) + writer := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(writer, "NAME\tDATA\tAGE") + for _, configMap := range configMaps.Items { + age := time.Since(configMap.CreationTimestamp.Time).Round(time.Second) + fmt.Fprintf(writer, "%s\t%d\t%s\n", + configMap.Name, + len(configMap.Data), + age) + } + writer.Flush() +} + +func printSecretsInNamespace(client kubernetes.Interface, ns string) { + secrets, err := client.CoreV1().Secrets(ns).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("Failed to get secrets in namespace %q: %v", ns, err) + return + } + + if len(secrets.Items) == 0 { + klog.V(1).Infof("No secrets found in namespace %q", ns) + return + } + + writer := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(writer, "NAME\tTYPE\tDATA\tAGE") + for _, secret := range secrets.Items { + age := time.Since(secret.CreationTimestamp.Time).Round(time.Second) + fmt.Fprintf(writer, "%s\t%s\t%d\t%s\n", + secret.Name, + secret.Type, + len(secret.Data), + age) + } + writer.Flush() +} diff --git a/tests/pkg/utils/mco_configmaps.go b/tests/pkg/utils/mco_configmaps.go index 7f07c6212..24e81a5a1 100644 --- a/tests/pkg/utils/mco_configmaps.go +++ b/tests/pkg/utils/mco_configmaps.go @@ -8,7 +8,6 @@ import ( "context" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) @@ -38,7 +37,7 @@ func CreateConfigMap(opt TestOptions, isHub bool, cm *corev1.ConfigMap) error { return err } -func GetConfigMap(clusterConfig Cluster, isHub bool, name string, +func GetConfigMap(opt TestOptions, isHub bool, name string, namespace string) (error, *corev1.ConfigMap) { clientKube := getKubeClientForCluster(clusterConfig, isHub) cm, err := clientKube.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index fdc4719cb..0b3ddd26f 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/yaml" - "k8s.io/client-go/kubernetes" + "k8s.io/client-go/dynamic" "k8s.io/klog" ) @@ -101,26 +101,6 @@ func NewOCMMultiClusterHubGVR() schema.GroupVersionResource { Resource: "multiclusterhubs"} } -func ModifyMCOAvailabilityConfig(opt TestOptions, availabilityConfig string) error { - clientDynamic := NewKubeClientDynamic( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - - mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr != nil { - return getErr - } - - spec := mco.Object["spec"].(map[string]interface{}) - spec["availabilityConfig"] = availabilityConfig - _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) - if updateErr != nil { - return updateErr - } - return nil -} - func GetAllMCOPods(opt TestOptions) ([]corev1.Pod, error) { hubClient := NewKubeClient( opt.HubCluster.ClusterServerURL, @@ -149,183 +129,32 @@ func GetAllMCOPods(opt TestOptions) ([]corev1.Pod, error) { return mcoPods, nil } -func PrintAllMCOPodsStatus(opt TestOptions) { - podList, err := GetAllMCOPods(opt) - if err != nil { - klog.Errorf("Failed to get all MCO pods") - } - - if len(podList) == 0 { - klog.V(1).Infof("Failed to get pod in %q namespace", MCO_NAMESPACE) - } - - hubClient := NewKubeClient( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - - // Print mch-image-manifest configmap - mchImageManifestCM, err := ReadImageManifestConfigMap(hubClient) - if err != nil { - klog.Errorf("Failed to get mch-image-manifest configmap: %s", err.Error()) - } else { - klog.V(1).Infof("mch-image-manifest configmap: %v", mchImageManifestCM) - } - - LogPodsDebugInfo(hubClient, podList, false) -} - -func LogPodsDebugInfo(hubClient kubernetes.Interface, pods []corev1.Pod, force bool) { - if len(pods) == 0 { +func PrintObject(ctx context.Context, client dynamic.Interface, gvr schema.GroupVersionResource, ns, name string) { + if ns == "" || name == "" { + klog.V(1).Info("Namespace or name cannot be empty") return } - ns := pods[0].Namespace - podsNames := make([]string, 0, len(pods)) - for _, pod := range pods { - podsNames = append(podsNames, pod.Name) - } - - klog.V(1).Infof("Checking pods %v in namespace %q", podsNames, ns) - notRunningPodsCount := 0 - for _, pod := range pods { - if pod.Status.Phase != corev1.PodRunning { - notRunningPodsCount++ - } - - if pod.Status.Phase == corev1.PodRunning && !force { - continue - } - - klog.V(1).Infof("Pod %q is in phase %q and status: %s\n", - pod.Name, - pod.Status.Phase, - pod.Status.String()) - - // print pod events - events, err := hubClient.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{ - FieldSelector: "involvedObject.name=" + pod.Name, - }) - if err != nil { - klog.Errorf("Failed to get events for pod %s: %s", pod.Name, err.Error()) - } - - podEvents := make([]string, 0, len(events.Items)) - for _, event := range events.Items { - podEvents = append(podEvents, fmt.Sprintf("%s %s (%d): %s", event.Reason, event.LastTimestamp, event.Count, event.Message)) - } - formattedEvents := ">>>>>>>>>> pod events >>>>>>>>>>\n" + strings.Join(podEvents, "\n") + "\n<<<<<<<<<< pod events <<<<<<<<<<" - klog.V(1).Infof("Pod %q events: \n%s", pod.Name, formattedEvents) - - // print pod containers logs - for _, container := range pod.Spec.Containers { - logsRes := hubClient.CoreV1().Pods(ns).GetLogs(pod.Name, &corev1.PodLogOptions{ - Container: container.Name, - }).Do(context.Background()) - - if logsRes.Error() != nil { - klog.Errorf("Failed to get logs for pod %q: %s", pod.Name, logsRes.Error()) - continue - } - - logs, err := logsRes.Raw() - if err != nil { - klog.Errorf("Failed to get logs for pod %q container %q: %s", pod.Name, container.Name, err.Error()) - continue - } - - delimitedLogs := fmt.Sprintf(">>>>>>>>>> container logs >>>>>>>>>>\n%s<<<<<<<<<< container logs <<<<<<<<<<", string(logs)) - klog.V(1).Infof("Pod %q container %q logs: \n%s", pod.Name, container.Name, delimitedLogs) - } - } - - if notRunningPodsCount == 0 { - klog.V(1).Infof("All pods are running in namespace %q", ns) - } -} - -// ReadImageManifestConfigMap reads configmap with the label ocm-configmap-type=image-manifest. -func ReadImageManifestConfigMap(c kubernetes.Interface) (map[string]string, error) { - listOpts := metav1.ListOptions{ - LabelSelector: "ocm-configmap-type=image-manifest", - } - - imageCMList, err := c.CoreV1().ConfigMaps("open-cluster-management").List(context.TODO(), listOpts) + obj, err := client.Resource(gvr).Namespace(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("failed to list mch-image-manifest configmaps: %w", err) - } - - if len(imageCMList.Items) != 1 { - return nil, fmt.Errorf("found %d mch-image-manifest configmaps, expected 1", len(imageCMList.Items)) - } - - return imageCMList.Items[0].Data, nil -} - -func PrintMCOObject(opt TestOptions) { - clientDynamic := NewKubeClientDynamic( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr != nil { - klog.V(1).Infof("Failed to get mco object") + klog.V(1).Infof("Failed to get object %s in namespace %s: %v", name, ns, err) return } - spec, _ := json.MarshalIndent(mco.Object["spec"], "", " ") - status, _ := json.MarshalIndent(mco.Object["status"], "", " ") - klog.V(1).Infof("MCO spec: %+v\n", string(spec)) - klog.V(1).Infof("MCO status: %+v\n", string(status)) -} - -func PrintManagedClusterOBAObject(opt TestOptions) { - clientDynamic := GetKubeClientDynamic(opt, false) - oba, getErr := clientDynamic.Resource(NewMCOAddonGVR()). - Namespace(MCO_ADDON_NAMESPACE). - Get(context.TODO(), "observability-addon", metav1.GetOptions{}) - if getErr != nil { - klog.V(1).Infof("Failed to get oba object from managedcluster") - return - } - - spec, _ := json.MarshalIndent(oba.Object["spec"], "", " ") - status, _ := json.MarshalIndent(oba.Object["status"], "", " ") - klog.V(1).Infof("OBA spec: %+v\n", string(spec)) - klog.V(1).Infof("OBA status: %+v\n", string(status)) -} - -func GetAllOBAPods(opt TestOptions) ([]corev1.Pod, error) { - clientKube := getKubeClient(opt, false) - obaPods, err := clientKube.CoreV1().Pods(MCO_ADDON_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + spec, err := json.MarshalIndent(obj.Object["spec"], "", " ") if err != nil { - return []corev1.Pod{}, err - } - - return obaPods.Items, nil -} - -func PrintAllOBAPodsStatus(opt TestOptions) { - if GetManagedClusterName(opt) == "local-cluster" { - klog.V(1).Infof("Skip printing OBA pods status for local-cluster") - return - } - podList, err := GetAllOBAPods(opt) - if err != nil { - klog.Errorf("Failed to get all OBA pods: %v", err) + klog.V(1).Infof("Failed to marshal spec for object %s in namespace %s: %v", name, ns, err) return } - klog.V(1).Infof("Get <%v> pods in <%s> namespace from managedcluster", len(podList), MCO_ADDON_NAMESPACE) - if len(podList) == 0 { + status, err := json.MarshalIndent(obj.Object["status"], "", " ") + if err != nil { + klog.V(1).Infof("Failed to marshal status for object %s in namespace %s: %v", name, ns, err) return } - force := false - if len(podList) == 1 { // only the operator is up - force = true - } - LogPodsDebugInfo(getKubeClient(opt, false), podList, force) + klog.V(1).Infof("Object %s/%s/%s spec: %+v\n", ns, gvr.Resource, name, string(spec)) + klog.V(1).Infof("Object %s/%s/%s status: %+v\n", ns, gvr.Resource, name, string(status)) } func CheckAllPodNodeSelector(opt TestOptions, nodeSelector map[string]interface{}) error { @@ -617,43 +446,6 @@ func RevertMCOCRModification(opt TestOptions) error { return nil } -func CheckMCOAddon(opt TestOptions) error { - client := NewKubeClient( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - if len(opt.ManagedClusters) > 0 { - client = NewKubeClient( - opt.ManagedClusters[0].ClusterServerURL, - opt.ManagedClusters[0].KubeConfig, - "") - } - expectedPodNames := []string{ - "endpoint-observability-operator", - "metrics-collector-deployment", - } - podList, err := client.CoreV1().Pods(MCO_ADDON_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return err - } - podsn := make(map[string]corev1.PodPhase) - for _, pod := range podList.Items { - podsn[pod.Name] = pod.Status.Phase - } - for _, podName := range expectedPodNames { - exist := false - for key, value := range podsn { - if strings.HasPrefix(key, podName) && value == "Running" { - exist = true - } - } - if !exist { - return errors.New(podName + " not found") - } - } - return nil -} - func CheckMCOAddonResources(opt TestOptions) error { client := NewKubeClient( opt.HubCluster.ClusterServerURL, @@ -696,43 +488,6 @@ func CheckMCOAddonResources(opt TestOptions) error { return nil } -func ModifyMCORetentionResolutionRaw(opt TestOptions) error { - clientDynamic := NewKubeClientDynamic( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr != nil { - return getErr - } - - spec := mco.Object["spec"].(map[string]interface{}) - advRetentionCon, _ := CheckAdvRetentionConfig(opt) - if advRetentionCon { - retentionConfig := spec["advanced"].(map[string]interface{})["retentionConfig"].(map[string]interface{}) - retentionConfig["retentionResolutionRaw"] = "3d" - } - _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) - if updateErr != nil { - return updateErr - } - return nil -} - -func GetMCOAddonSpecMetrics(opt TestOptions) (bool, error) { - clientDynamic := NewKubeClientDynamic( - opt.HubCluster.ClusterServerURL, - opt.KubeConfig, - opt.HubCluster.KubeContext) - mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr != nil { - return false, getErr - } - - enable := mco.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{})["enableMetrics"].(bool) - return enable, nil -} - func ModifyMCOAddonSpecMetrics(opt TestOptions, enable bool) error { clientDynamic := NewKubeClientDynamic( opt.HubCluster.ClusterServerURL, diff --git a/tests/pkg/utils/mco_metric.go b/tests/pkg/utils/mco_metric.go index 16318114c..ac903ca76 100644 --- a/tests/pkg/utils/mco_metric.go +++ b/tests/pkg/utils/mco_metric.go @@ -8,7 +8,6 @@ import ( "bufio" "context" "crypto/tls" - "errors" "fmt" "io" "net/http" @@ -22,17 +21,54 @@ import ( "k8s.io/klog" ) -func ContainManagedClusterMetric(opt TestOptions, query string, matchedLabels []string) (error, bool) { +type GrafanaResponse struct { + Status string `json:"status"` + Data struct { + ResultType string `json:"resultType"` + Result []struct { + Metric map[string]string `json:"metric"` + Value []interface{} `json:"value"` // Use interface{} because value can be mixed types + } `json:"result"` + } `json:"data"` +} + +func (r GrafanaResponse) ContainsLabelsSet(labels map[string]string) bool { + ret := false +loop: + for _, result := range r.Data.Result { + for key, val := range labels { + if result.Metric[key] != val { + continue loop + } + } + ret = true + break + } + + return ret +} + +func (r GrafanaResponse) String() string { + var ret strings.Builder + ret.WriteString(fmt.Sprintf("Status: %s\n", r.Status)) + ret.WriteString(fmt.Sprintf("ResultType: %s\n", r.Data.ResultType)) + ret.WriteString("Result:\n") + for _, result := range r.Data.Result { + ret.WriteString(fmt.Sprintf("%v %v\n", result.Metric, result.Value)) + } + return ret.String() +} + +func QueryGrafana(opt TestOptions, query string) (*GrafanaResponse, error) { grafanaConsoleURL := GetGrafanaURL(opt) path := "/api/datasources/proxy/1/api/v1/query?" queryParams := url.PathEscape(fmt.Sprintf("query=%s", query)) - klog.V(5).Infof("request url is: %s\n", grafanaConsoleURL+path+queryParams) req, err := http.NewRequest( "GET", grafanaConsoleURL+path+queryParams, nil) if err != nil { - return err, false + return nil, err } client := &http.Client{} @@ -45,7 +81,7 @@ func ContainManagedClusterMetric(opt TestOptions, query string, matchedLabels [] client = &http.Client{Transport: tr} token, err := FetchBearerToken(opt) if err != nil { - return err, false + return nil, err } if token != "" { req.Header.Set("Authorization", "Bearer "+token) @@ -55,41 +91,29 @@ func ContainManagedClusterMetric(opt TestOptions, query string, matchedLabels [] resp, err := client.Do(req) if err != nil { - return err, false + return nil, err } if resp.StatusCode != http.StatusOK { - klog.Errorf("resp: %+v\n", resp) - klog.Errorf("err: %+v\n", err) - return fmt.Errorf("failed to access managed cluster metrics via grafana console: %s", query), false + return nil, fmt.Errorf("failed to access managed cluster metrics via grafana console, status code: %d", resp.StatusCode) } - metricResult, err := io.ReadAll(resp.Body) - klog.V(5).Infof("metricResult: %s\n", metricResult) + respBody, err := io.ReadAll(resp.Body) if err != nil { - return err, false - } - - if !strings.Contains(string(metricResult), `"status":"success"`) { - return errors.New("failed to find valid status from response"), false + return nil, fmt.Errorf("failed to read response body: %v", err) } - if strings.Contains(string(metricResult), `"result":[]`) { - return errors.New("failed to find metric name from response"), false + metricResult := GrafanaResponse{} + err = yaml.Unmarshal(respBody, &metricResult) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %v", err) } - contained := true - for _, label := range matchedLabels { - if !strings.Contains(string(metricResult), label) { - contained = false - break - } - } - if !contained { - return errors.New("failed to find metric name from response"), false + if metricResult.Status != "success" { + return &metricResult, fmt.Errorf("failed to get metric from response, status: %s", metricResult.Status) } - return nil, true + return &metricResult, nil } type MetricsAllowlist struct { diff --git a/tests/pkg/utils/mco_pods.go b/tests/pkg/utils/mco_pods.go index d3a761324..91b6d721c 100644 --- a/tests/pkg/utils/mco_pods.go +++ b/tests/pkg/utils/mco_pods.go @@ -37,16 +37,6 @@ func GetPodList(opt TestOptions, isHub bool, namespace string, labelSelector str return nil, podList } -func DeletePod(opt TestOptions, isHub bool, namespace, name string) error { - clientKube := getKubeClient(opt, isHub) - err := clientKube.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - klog.Errorf("Failed to delete pod %s in namespace %s due to %v", name, namespace, err) - return err - } - return nil -} - func GetPodLogs( opt TestOptions, isHub bool, diff --git a/tests/pkg/utils/utils.go b/tests/pkg/utils/utils.go index 74748266e..0ad676d58 100644 --- a/tests/pkg/utils/utils.go +++ b/tests/pkg/utils/utils.go @@ -6,7 +6,6 @@ package utils import ( "context" - "encoding/json" "errors" "fmt" "os" @@ -25,9 +24,7 @@ import ( k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -36,26 +33,6 @@ import ( "k8s.io/klog" ) -func NewUnversionedRestClient(url, kubeconfig, ctx string) *rest.RESTClient { - klog.V(5).Infof("Create unversionedRestClient for url %s using kubeconfig path %s\n", url, kubeconfig) - config, err := LoadConfig(url, kubeconfig, ctx) - if err != nil { - panic(err) - } - - oldNegotiatedSerializer := config.NegotiatedSerializer - config.NegotiatedSerializer = unstructuredscheme.NewUnstructuredNegotiatedSerializer() - kubeRESTClient, err := rest.UnversionedRESTClientFor(config) - // restore cfg before leaving - defer func(cfg *rest.Config) { cfg.NegotiatedSerializer = oldNegotiatedSerializer }(config) - - if err != nil { - panic(err) - } - - return kubeRESTClient -} - func NewKubeClient(url, kubeconfig, ctx string) kubernetes.Interface { config, err := LoadConfig(url, kubeconfig, ctx) if err != nil { @@ -99,21 +76,6 @@ func NewKubeClientAPIExtension(url, kubeconfig, ctx string) apiextensionsclients return clientset } -// func NewKubeClientDiscovery(url, kubeconfig, ctx string) *discovery.DiscoveryClient { -// klog.V(5).Infof("Create kubeclient discovery for url %s using kubeconfig path %s\n", url, kubeconfig) -// config, err := LoadConfig(url, kubeconfig, ctx) -// if err != nil { -// panic(err) -// } - -// clientset, err := discovery.NewDiscoveryClientForConfig(config) -// if err != nil { -// panic(err) -// } - -// return clientset -// } - func CreateMCOTestingRBAC(opt TestOptions) error { // create new service account and new clusterrolebinding and bind the serviceaccount to cluster-admin clusterrole // then the bearer token can be retrieved from the secret of created serviceaccount @@ -298,13 +260,14 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { apiVersion = v.(string) } + klog.V(5).Infof("Applying kind %q with name %q in namespace %q", kind, obj.GetName(), obj.GetNamespace()) + clientKube := NewKubeClient(url, kubeconfig, ctx) clientAPIExtension := NewKubeClientAPIExtension(url, kubeconfig, ctx) // now use switch over the type of the object // and match each type-case switch kind { case "CustomResourceDefinition": - klog.V(5).Infof("Install CRD: %s\n", f) obj := &apiextensionsv1.CustomResourceDefinition{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -323,7 +286,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientAPIExtension.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), existingObject, metav1.UpdateOptions{}) } case "Namespace": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.Namespace{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -340,7 +302,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().Namespaces().Update(context.TODO(), existingObject, metav1.UpdateOptions{}) } case "ServiceAccount": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.ServiceAccount{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -359,7 +320,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().ServiceAccounts(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "ClusterRoleBinding": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &rbacv1.ClusterRoleBinding{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -376,7 +336,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.RbacV1().ClusterRoleBindings().Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "Secret": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.Secret{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -393,7 +352,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().Secrets(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "ConfigMap": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.ConfigMap{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -412,7 +370,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().ConfigMaps(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "Service": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.Service{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -432,7 +389,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().Services(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "PersistentVolumeClaim": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.PersistentVolumeClaim{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -452,7 +408,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "Deployment": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &appsv1.Deployment{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -471,7 +426,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.AppsV1().Deployments(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "LimitRange": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.LimitRange{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -490,7 +444,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().LimitRanges(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "ResourceQuota": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &corev1.ResourceQuota{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -509,7 +462,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.CoreV1().ResourceQuotas(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } case "StorageClass": - klog.V(5).Infof("Install %s: %s\n", kind, f) obj := &storagev1.StorageClass{} err = yaml.Unmarshal([]byte(f), obj) if err != nil { @@ -533,13 +485,11 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if apiVersion == "observability.open-cluster-management.io/v1beta1" { gvr = NewMCOGVRV1BETA1() } - klog.V(5).Infof("Install MultiClusterObservability: %s\n", f) case "PrometheusRule": gvr = schema.GroupVersionResource{ Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"} - klog.V(5).Infof("Install PrometheusRule: %s\n", f) default: return fmt.Errorf("resource %s not supported", kind) } @@ -612,45 +562,6 @@ func StatusContainsTypeEqualTo(u *unstructured.Unstructured, typeString string) return false } -// GetCluster returns the first cluster with a given tag -func GetCluster(tag string, clusters []Cluster) *Cluster { - for _, cluster := range clusters { - if tag, ok := cluster.Tags[tag]; ok { - if tag { - return &cluster - } - } - } - return nil -} - -// GetClusters returns all clusters with a given tag -func GetClusters(tag string, clusters []Cluster) []*Cluster { - filteredClusters := make([]*Cluster, 0) - for i, cluster := range clusters { - if tag, ok := cluster.Tags[tag]; ok { - if tag { - filteredClusters = append(filteredClusters, &clusters[i]) - } - } - } - return filteredClusters -} - -func HaveServerResources(c Cluster, kubeconfig string, expectedAPIGroups []string) error { - clientAPIExtension := NewKubeClientAPIExtension(c.ClusterServerURL, kubeconfig, c.KubeContext) - clientDiscovery := clientAPIExtension.Discovery() - for _, apiGroup := range expectedAPIGroups { - klog.V(1).Infof("Check if %s exists", apiGroup) - _, err := clientDiscovery.ServerResourcesForGroupVersion(apiGroup) - if err != nil { - klog.V(1).Infof("Error while retrieving server resource %s: %s", apiGroup, err.Error()) - return err - } - } - return nil -} - func HaveCRDs(c Cluster, kubeconfig string, expectedCRDs []string) error { clientAPIExtension := NewKubeClientAPIExtension(c.ClusterServerURL, kubeconfig, c.KubeContext) clientAPIExtensionV1 := clientAPIExtension.ApiextensionsV1() @@ -665,93 +576,6 @@ func HaveCRDs(c Cluster, kubeconfig string, expectedCRDs []string) error { return nil } -func HaveDeploymentsInNamespace( - c Cluster, - kubeconfig string, - namespace string, - expectedDeploymentNames []string, -) error { - - client := NewKubeClient(c.ClusterServerURL, kubeconfig, c.KubeContext) - versionInfo, err := client.Discovery().ServerVersion() - if err != nil { - return err - } - klog.V(1).Infof("Server version info: %v", versionInfo) - - deployments := client.AppsV1().Deployments(namespace) - - for _, deploymentName := range expectedDeploymentNames { - klog.V(1).Infof("Check if deployment %s exists", deploymentName) - deployment, err := deployments.Get(context.TODO(), deploymentName, metav1.GetOptions{}) - if err != nil { - klog.V(1).Infof("Error while retrieving deployment %s: %s", deploymentName, err.Error()) - return err - } - - if deployment.Status.Replicas != deployment.Status.ReadyReplicas { - err = fmt.Errorf("%s: Expect %d but got %d Ready replicas", - deploymentName, - deployment.Status.Replicas, - deployment.Status.ReadyReplicas) - klog.Errorln(err) - return err - } - - for _, condition := range deployment.Status.Conditions { - if condition.Reason == "MinimumReplicasAvailable" { - if condition.Status != corev1.ConditionTrue { - err = fmt.Errorf("%s: Expect %s but got %s", - deploymentName, - condition.Status, - corev1.ConditionTrue) - klog.Errorln(err) - return err - } - } - } - } - - return nil -} - -func GetKubeVersion(client *rest.RESTClient) version.Info { - kubeVersion := version.Info{} - - versionBody, err := client.Get().AbsPath("/version").Do(context.TODO()).Raw() - if err != nil { - klog.Errorf("fail to GET /version with %v", err) - return version.Info{} - } - - err = json.Unmarshal(versionBody, &kubeVersion) - if err != nil { - klog.Errorf("fail to Unmarshal, got '%s': %v", string(versionBody), err) - return version.Info{} - } - - return kubeVersion -} - -func IsOpenshift(client *rest.RESTClient) bool { - //check whether the cluster is openshift or not for openshift version 3.11 and before - _, err := client.Get().AbsPath("/version/openshift").Do(context.TODO()).Raw() - if err == nil { - klog.V(5).Info("Found openshift version from /version/openshift") - return true - } - - //check whether the cluster is openshift or not for openshift version 4.1 - _, err = client.Get().AbsPath("/apis/config.openshift.io/v1/clusterversions").Do(context.TODO()).Raw() - if err == nil { - klog.V(5).Info("Found openshift version from /apis/config.openshift.io/v1/clusterversions") - return true - } - - klog.V(5).Infof("fail to GET openshift version, assuming not OpenShift: %s", err.Error()) - return false -} - // IntegrityChecking checks to ensure all required conditions are met when completing the specs func IntegrityChecking(opt TestOptions) error { var err error From 537c28a37d9f5ba6211d98d2d906a52725788c14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Wed, 22 May 2024 13:24:13 +0200 Subject: [PATCH 24/33] Bump MCO memory limit to 3Gi (#1447) When the memory limit of MCO was bumped from 1Gi to 2Gi in c3e1e947854f3d711afaeccc814e0951a0158af6 it was updated only in the generated manifests, and not source used to generate said manifests. As a result, when we ran `make bundle` the limit was reverted. This was done in e97c825b579398bb68f76c114c62dbec60927ab1 (note this was later partially reverted in d74277862ebb83f14cdca85644aea486aa7b59a2 but the memory limit was not). The end result was the limit being lowered from 2Gi to 1Gi again causing problems on systems with a large number of managed clusters. In this commit we raise the limit to 3Gi. 3Gi might seem like a high limit, however we occasionally saw OOMs with a 2Gi limit on large perfscale tests. Further, since 2.9 we no longer have any sensible ways to increase these limits on systems that need it, so it's better to have a large margin on the limit for now. Signed-off-by: Jacob Baungard Hansen --- ...lticluster-observability-operator.clusterserviceversion.yaml | 2 +- operators/multiclusterobservability/config/manager/manager.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml index 2049c7cb1..77508b297 100644 --- a/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml +++ b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml @@ -520,7 +520,7 @@ spec: resources: limits: cpu: 600m - memory: 1Gi + memory: 3Gi requests: cpu: 100m memory: 128Mi diff --git a/operators/multiclusterobservability/config/manager/manager.yaml b/operators/multiclusterobservability/config/manager/manager.yaml index c4c0fdfa7..fe4f621c9 100644 --- a/operators/multiclusterobservability/config/manager/manager.yaml +++ b/operators/multiclusterobservability/config/manager/manager.yaml @@ -62,7 +62,7 @@ spec: resources: limits: cpu: 600m - memory: 1024Mi + memory: 3Gi requests: cpu: 100m memory: 128Mi From 2c65c5d03aa48094e333d090f216ecc6eaec1f58 Mon Sep 17 00:00:00 2001 From: Saswata Mukherjee Date: Wed, 22 May 2024 13:48:59 +0100 Subject: [PATCH 25/33] Expose new promql-engine opt-out option in CR (#1446) * Expose new promql-engine opt-out option in CR Signed-off-by: Saswata Mukherjee * make bundle Signed-off-by: Saswata Mukherjee --------- Signed-off-by: Saswata Mukherjee --- go.mod | 2 +- go.sum | 4 ++-- .../api/v1beta2/multiclusterobservability_types.go | 4 ++++ ...en-cluster-management.io_multiclusterobservabilities.yaml | 3 +++ ...en-cluster-management.io_multiclusterobservabilities.yaml | 4 ++++ .../controllers/multiclusterobservability/observatorium.go | 5 +++++ 6 files changed, 19 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 67ac77107..94758aea6 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace github.com/stolostron/multiclusterhub-operator v0.0.0-20220902185016-e81ccfbecf55 - github.com/stolostron/observatorium-operator v0.0.0-20240403132649-1f7129fc3a27 + github.com/stolostron/observatorium-operator v0.0.0-20240521155951-b4302d25bd18 github.com/stretchr/testify v1.8.4 github.com/thanos-io/thanos v0.30.0 go.uber.org/zap v1.26.0 diff --git a/go.sum b/go.sum index 6afdecdfb..27314fbbe 100644 --- a/go.sum +++ b/go.sum @@ -1817,8 +1817,8 @@ github.com/stolostron/backplane-operator v0.0.0-20220727154840-1f60baf1fb98 h1:f github.com/stolostron/backplane-operator v0.0.0-20220727154840-1f60baf1fb98/go.mod h1:IGZxghtPz8rJylGtW8XUAQdlqRai2j7aL4ymOINsP/c= github.com/stolostron/multiclusterhub-operator v0.0.0-20220902185016-e81ccfbecf55 h1:sNpuRgbyAEvOjayzShyPNt+Eg34jmJPNIUY9cFvUlwU= github.com/stolostron/multiclusterhub-operator v0.0.0-20220902185016-e81ccfbecf55/go.mod h1:YCJavcWI4f3PV/LbgMNWsYl/oCbH/Fbn4p+Epd9gro0= -github.com/stolostron/observatorium-operator v0.0.0-20240403132649-1f7129fc3a27 h1:21h43ofoLC2hMPKH0fY+oglbxSH4rphoWOjbjiNTUes= -github.com/stolostron/observatorium-operator v0.0.0-20240403132649-1f7129fc3a27/go.mod h1:fFyJt9/dkQ1/4NxiW4CjH4lj7brxGlkA4SscxoLfzYY= +github.com/stolostron/observatorium-operator v0.0.0-20240521155951-b4302d25bd18 h1:E/PNsOae69+k4zpbKfXQYrI14fEQSKPjv+yP8eSHOV4= +github.com/stolostron/observatorium-operator v0.0.0-20240521155951-b4302d25bd18/go.mod h1:fFyJt9/dkQ1/4NxiW4CjH4lj7brxGlkA4SscxoLfzYY= github.com/stolostron/search-v2-operator v0.0.0-20220721051905-143d28ab4f10 h1:USGd9WwtGqAflJ0sY7k41hCO5L5BuYaPElmAsZm/q4M= github.com/stolostron/search-v2-operator v0.0.0-20220721051905-143d28ab4f10/go.mod h1:o73lDVENck8rRBnjt+PmbDer0MyMq2LQ7g8FsqQbQuw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go index accea4da1..de03d3b35 100644 --- a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go @@ -108,6 +108,10 @@ type QuerySpec struct { // +optional ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` + // Set to true to use the old Prometheus engine for PromQL queries. + // +optional + UsePrometheusEngine bool `json:"usePrometheusEngine,omitempty"` + // WARNING: Use only with guidance from Red Hat Support. Using this feature incorrectly can // lead to an unrecoverable state, data loss, or both, which is not covered by Red Hat Support. // +optional diff --git a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml index 98d5dc6d7..7c449d53f 100644 --- a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -2092,6 +2092,9 @@ spec: type: string description: Annotations is an unstructured key value map stored with a service account type: object + usePrometheusEngine: + description: Set to true to use the old Prometheus engine for PromQL queries. + type: boolean type: object queryFrontend: description: spec for thanos-query-frontend diff --git a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml index fe9d8b733..180cce973 100644 --- a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -3269,6 +3269,10 @@ spec: description: Annotations is an unstructured key value map stored with a service account type: object + usePrometheusEngine: + description: Set to true to use the old Prometheus engine + for PromQL queries. + type: boolean type: object queryFrontend: description: spec for thanos-query-frontend diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go index 98db22661..482aa3609 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go @@ -884,6 +884,11 @@ func newQuerySpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.QuerySp mco.Spec.AdvancedConfig.Query.Containers != nil { querySpec.Containers = mco.Spec.AdvancedConfig.Query.Containers } + + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.Query != nil && + mco.Spec.AdvancedConfig.Query.UsePrometheusEngine { + querySpec.UsePrometheusEngine = true + } return querySpec } From 5f607dc147c7f69b38d0537f033d9a4529881abd Mon Sep 17 00:00:00 2001 From: Douglas Camata <159076+douglascamata@users.noreply.github.com> Date: Wed, 22 May 2024 15:49:30 +0200 Subject: [PATCH 26/33] [ACM-11543] Do not add custom obs api url to certs SAN (#1441) * Do not add custom obs api url to certs SAN Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Fix unit tests Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Avoid collision with package name Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> * Rename URL.Host to HostPath Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --------- Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com> --- .../multiclusterobservability_shared.go | 11 +++ .../placementrule/hub_info_secret.go | 10 +-- .../placementrule/hub_info_secret_test.go | 44 +++++++++++- .../pkg/certificates/certificates.go | 2 +- .../pkg/config/config.go | 28 ++++++-- .../pkg/config/config_test.go | 71 +++++++++++++++++-- 6 files changed, 149 insertions(+), 17 deletions(-) diff --git a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go index 9bbf588cd..9d38df4fa 100644 --- a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go +++ b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go @@ -20,11 +20,22 @@ import ( // +kubebuilder:validation:MaxLength=2083 type URL string +// Validate validates the underlying URL. func (u URL) Validate() error { _, err := url.Parse(string(u)) return err } +// HostPath returns the URL's host together with its path. +// This also runs a validation of the underlying url. +func (u URL) HostPath() (string, error) { + parsedUrl, err := url.Parse(string(u)) + if err != nil { + return "", err + } + return parsedUrl.Host + parsedUrl.Path, nil +} + // ObservabilityAddonSpec is the spec of observability addon. type ObservabilityAddonSpec struct { // EnableMetrics indicates the observability addon push metrics to hub server. diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go index 29f862681..a32a1bccf 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go @@ -23,15 +23,15 @@ import ( func generateHubInfoSecret(client client.Client, obsNamespace string, namespace string, ingressCtlCrdExists bool) (*corev1.Secret, error) { - obsApiRouteHost := "" + obsAPIHost := "" alertmanagerEndpoint := "" alertmanagerRouterCA := "" if ingressCtlCrdExists { var err error - obsApiRouteHost, err = config.GetObsAPIHost(context.TODO(), client, obsNamespace) + obsAPIHost, err = config.GetObsAPIExternalHost(context.TODO(), client, obsNamespace) if err != nil { - log.Error(err, "Failed to get the host for observatorium API route") + log.Error(err, "Failed to get the host for Observatorium API host URL") return nil, err } @@ -56,7 +56,7 @@ func generateHubInfoSecret(client client.Client, obsNamespace string, } else { // for KinD support, the managedcluster and hub cluster are assumed in the same cluster, the observatorium-api // will be accessed through k8s service FQDN + port - obsApiRouteHost = config.GetOperandNamePrefix() + "observatorium-api" + "." + config.GetDefaultNamespace() + ".svc.cluster.local:8080" + obsAPIHost = config.GetOperandNamePrefix() + "observatorium-api" + "." + config.GetDefaultNamespace() + ".svc.cluster.local:8080" // if alerting is disabled, do not set alertmanagerEndpoint if !config.IsAlertingDisabled() { alertmanagerEndpoint = config.AlertmanagerServiceName + "." + config.GetDefaultNamespace() + ".svc.cluster.local:9095" @@ -70,7 +70,7 @@ func generateHubInfoSecret(client client.Client, obsNamespace string, } obsApiURL := url.URL{ - Host: obsApiRouteHost, + Host: obsAPIHost, Path: operatorconfig.ObservatoriumAPIRemoteWritePath, } if !obsApiURL.IsAbs() { diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go index 15eb94158..51fe9c501 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go @@ -10,6 +10,8 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -120,10 +122,34 @@ func newTestAmDefaultCA() *corev1.ConfigMap { } } +func newMultiClusterObservability() *mcov1beta2.MultiClusterObservability { + return &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + AlertmanagerStorageSize: "2Gi", + }, + }, + } +} + func TestNewSecret(t *testing.T) { initSchema(t) - objs := []runtime.Object{newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestIngressController(), newTestRouteCASecret()} + mco := newMultiClusterObservability() + config.SetMonitoringCRName(mco.Name) + objs := []runtime.Object{ + newTestObsApiRoute(), + newTestAlertmanagerRoute(), + newTestIngressController(), + newTestRouteCASecret(), + mco, + } c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() hubInfo, err := generateHubInfoSecret(c, mcoNamespace, namespace, true) @@ -138,6 +164,22 @@ func TestNewSecret(t *testing.T) { if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != "https://"+routeHost || hub.AlertmanagerRouterCA != routerCA { t.Fatalf("Wrong content in hub info secret: \ngot: "+hub.ObservatoriumAPIEndpoint+" "+hub.AlertmanagerEndpoint+" "+hub.AlertmanagerRouterCA, clusterName+" "+"https://test-host"+" "+"test-host"+" "+routerCA) } + + mco.Spec.AdvancedConfig = &mcov1beta2.AdvancedConfig{CustomObservabilityHubURL: "https://custom-obs", CustomAlertmanagerHubURL: "https://custom-am"} + c = fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + hubInfo, err = generateHubInfoSecret(c, mcoNamespace, namespace, true) + if err != nil { + t.Fatalf("Failed to initial the hub info secret: (%v)", err) + } + hub = &operatorconfig.HubInfo{} + err = yaml.Unmarshal(hubInfo.Data[operatorconfig.HubInfoSecretKey], &hub) + if err != nil { + t.Fatalf("Failed to unmarshal data in hub info secret (%v)", err) + } + if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://custom-obs") || !strings.HasPrefix(hub.AlertmanagerEndpoint, "https://custom-am") || hub.AlertmanagerRouterCA != routerCA { + t.Fatalf("Wrong content in hub info secret: \ngot: "+hub.ObservatoriumAPIEndpoint+" "+hub.AlertmanagerEndpoint+" "+hub.AlertmanagerRouterCA, clusterName+" "+"https://custom-obs"+" "+"custom-obs"+" "+routerCA) + } + } func TestNewBYOSecret(t *testing.T) { diff --git a/operators/multiclusterobservability/pkg/certificates/certificates.go b/operators/multiclusterobservability/pkg/certificates/certificates.go index d942ae099..ce374f439 100644 --- a/operators/multiclusterobservability/pkg/certificates/certificates.go +++ b/operators/multiclusterobservability/pkg/certificates/certificates.go @@ -461,7 +461,7 @@ func pemEncode(cert []byte, key []byte) (*bytes.Buffer, *bytes.Buffer) { func getHosts(c client.Client, ingressCtlCrdExists bool) ([]string, error) { hosts := []string{config.GetObsAPISvc(config.GetOperandName(config.Observatorium))} if ingressCtlCrdExists { - url, err := config.GetObsAPIHost(context.TODO(), c, config.GetDefaultNamespace()) + url, err := config.GetObsAPIRouteHost(context.TODO(), c, config.GetDefaultNamespace()) if err != nil { log.Error(err, "Failed to get api route address") return nil, err diff --git a/operators/multiclusterobservability/pkg/config/config.go b/operators/multiclusterobservability/pkg/config/config.go index 9ad6ec534..0ce7a5b36 100644 --- a/operators/multiclusterobservability/pkg/config/config.go +++ b/operators/multiclusterobservability/pkg/config/config.go @@ -480,8 +480,23 @@ func GetDefaultTenantName() string { return defaultTenantName } -// GetObsAPIHost is used to get the URL for observartium api gateway. -func GetObsAPIHost(ctx context.Context, client client.Client, namespace string) (string, error) { +// GetObsAPIRouteHost is used to Route's host for Observatorium API. This doesn't take into consideration +// the `advanced.customObservabilityHubURL` configuration. +func GetObsAPIRouteHost(ctx context.Context, client client.Client, namespace string) (string, error) { + mco := &observabilityv1beta2.MultiClusterObservability{} + err := client.Get(ctx, + types.NamespacedName{ + Name: GetMonitoringCRName(), + }, mco) + if err != nil && !errors.IsNotFound(err) { + return "", err + } + return GetRouteHost(client, obsAPIGateway, namespace) +} + +// GetObsAPIExternalHost is used to get the frontend URL that should be used to reach the Observatorium API instance. +// This takes into consideration the `advanced.customObservabilityHubURL` configuration. +func GetObsAPIExternalHost(ctx context.Context, client client.Client, namespace string) (string, error) { mco := &observabilityv1beta2.MultiClusterObservability{} err := client.Get(ctx, types.NamespacedName{ @@ -492,11 +507,16 @@ func GetObsAPIHost(ctx context.Context, client client.Client, namespace string) } advancedConfig := mco.Spec.AdvancedConfig if advancedConfig != nil && advancedConfig.CustomObservabilityHubURL != "" { - err := advancedConfig.CustomObservabilityHubURL.Validate() + hubObsUrl := advancedConfig.CustomObservabilityHubURL + err := hubObsUrl.Validate() + if err != nil { + return "", err + } + obsHostPath, err := hubObsUrl.HostPath() if err != nil { return "", err } - return string(advancedConfig.CustomObservabilityHubURL), nil + return obsHostPath, nil } return GetRouteHost(client, obsAPIGateway, namespace) } diff --git a/operators/multiclusterobservability/pkg/config/config_test.go b/operators/multiclusterobservability/pkg/config/config_test.go index 407919465..353a9bc42 100644 --- a/operators/multiclusterobservability/pkg/config/config_test.go +++ b/operators/multiclusterobservability/pkg/config/config_test.go @@ -15,6 +15,7 @@ import ( routev1 "github.com/openshift/api/route/v1" fakeconfigclient "github.com/openshift/client-go/config/clientset/versioned/fake" observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -252,7 +253,7 @@ func TestGetClusterIDFailed(t *testing.T) { } } -func TestGetObsAPIHost(t *testing.T) { +func TestGetObsAPIRouteHost(t *testing.T) { route := &routev1.Route{ ObjectMeta: metav1.ObjectMeta{ Name: obsAPIGateway, @@ -267,12 +268,14 @@ func TestGetObsAPIHost(t *testing.T) { scheme.AddKnownTypes(mcov1beta2.GroupVersion, &mcov1beta2.MultiClusterObservability{}) client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route).Build() - host, _ := GetObsAPIHost(context.TODO(), client, "default") + host, err := GetObsAPIRouteHost(context.TODO(), client, "default") + assert.NoError(t, err) if host == apiServerURL { t.Errorf("Should not get route host in default namespace") } - host, _ = GetObsAPIHost(context.TODO(), client, "test") + host, err = GetObsAPIRouteHost(context.TODO(), client, "test") + assert.NoError(t, err) if host != apiServerURL { t.Errorf("Observatorium api (%v) is not the expected (%v)", host, apiServerURL) } @@ -289,14 +292,70 @@ func TestGetObsAPIHost(t *testing.T) { }, } client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() - host, _ = GetObsAPIHost(context.TODO(), client, "test") - if host != customBaseURL { + host, err = GetObsAPIRouteHost(context.TODO(), client, "test") + assert.NoError(t, err) + if host != apiServerURL { + t.Errorf("Observatorium api (%v) is not the expected (%v)", host, apiServerURL) + } + + mco.Spec.AdvancedConfig.CustomObservabilityHubURL = "httpa://foob ar.c" + client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() + host, err = GetObsAPIRouteHost(context.TODO(), client, "test") + assert.NoError(t, err) + if host != apiServerURL { + t.Errorf("Observatorium api (%v) is not the expected (%v)", host, apiServerURL) + } +} + +func TestGetObsAPIExternalHost(t *testing.T) { + route := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: obsAPIGateway, + Namespace: "test", + }, + Spec: routev1.RouteSpec{ + Host: apiServerURL, + }, + } + scheme := runtime.NewScheme() + scheme.AddKnownTypes(routev1.GroupVersion, route) + scheme.AddKnownTypes(mcov1beta2.GroupVersion, &mcov1beta2.MultiClusterObservability{}) + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route).Build() + + host, err := GetObsAPIExternalHost(context.TODO(), client, "default") + assert.NoError(t, err) + if host == apiServerURL { + t.Errorf("Should not get route host in default namespace") + } + + host, err = GetObsAPIExternalHost(context.TODO(), client, "test") + assert.NoError(t, err) + if host != apiServerURL { + t.Errorf("Observatorium api (%v) is not the expected (%v)", host, apiServerURL) + } + + customBaseURL := "https://custom.base/url" + expectedHost := "custom.base/url" + mco := &mcov1beta2.MultiClusterObservability{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetMonitoringCRName(), + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + AdvancedConfig: &mcov1beta2.AdvancedConfig{ + CustomObservabilityHubURL: mcoshared.URL(customBaseURL), + }, + }, + } + client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() + host, err = GetObsAPIExternalHost(context.TODO(), client, "test") + assert.NoError(t, err) + if host != expectedHost { t.Errorf("Observatorium api (%v) is not the expected (%v)", host, customBaseURL) } mco.Spec.AdvancedConfig.CustomObservabilityHubURL = "httpa://foob ar.c" client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(route, mco).Build() - _, err := GetObsAPIHost(context.TODO(), client, "test") + _, err = GetObsAPIExternalHost(context.TODO(), client, "test") if err == nil { t.Errorf("expected error when parsing URL '%v', but got none", mco.Spec.AdvancedConfig.CustomObservabilityHubURL) } From 29ed654ef058cbb23b466e46d49a24a5fdf195ce Mon Sep 17 00:00:00 2001 From: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> Date: Wed, 22 May 2024 16:40:21 +0200 Subject: [PATCH 27/33] Add integration tests to github workflows (#1436) * add github workflow Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> * add build step Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --------- Signed-off-by: Thibault Mange <22740367+thibaultmg@users.noreply.github.com> --- .github/workflows/integration_tests.yaml | 35 + Makefile | 5 + ...bilityaddon_controller_integration_test.go | 38 +- .../testdata/crd/prometheusrule-crd.yaml | 100 --- .../testdata/crd/servicemonitor-crd.json | 780 ------------------ .../status_controller_integration_test.go | 10 +- scripts/install-binaries.sh | 6 + 7 files changed, 91 insertions(+), 883 deletions(-) create mode 100644 .github/workflows/integration_tests.yaml delete mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/prometheusrule-crd.yaml delete mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/servicemonitor-crd.json diff --git a/.github/workflows/integration_tests.yaml b/.github/workflows/integration_tests.yaml new file mode 100644 index 000000000..a907d312b --- /dev/null +++ b/.github/workflows/integration_tests.yaml @@ -0,0 +1,35 @@ +name: integration + +on: + push: + branches: + - main + tags: + - "*" + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + name: Integration tests + env: + GOBIN: /tmp/.bin + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: 1.21.x + cache-dependency-path: "**/*.sum" + + - name: Install deps + run: make deps + - name: Build + run: make build + + - name: Run tests + run: | + source <(BIN_DIR=$GOBIN make install-envtest-deps) + make integration-test-operators diff --git a/Makefile b/Makefile index b262e4a03..cb30512c2 100644 --- a/Makefile +++ b/Makefile @@ -208,3 +208,8 @@ install-integration-test-deps: install-e2e-test-deps: @mkdir -p $(BIN_DIR) @./scripts/install-binaries.sh install_e2e_tests_deps $(BIN_DIR) + +.PHONY: install-envtest-deps +install-envtest-deps: + @mkdir -p $(BIN_DIR) + @./scripts/install-binaries.sh install_envtest_deps $(BIN_DIR) \ No newline at end of file diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go index 81753ed38..cbade412b 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_integration_test.go @@ -8,6 +8,7 @@ package observabilityendpoint import ( "context" + "os" "path/filepath" "testing" "time" @@ -17,11 +18,15 @@ import ( "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/hypershift" "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" kubescheme "k8s.io/client-go/kubernetes/scheme" @@ -64,7 +69,8 @@ func TestIntegrationReconcileHypershift(t *testing.T) { } mgr, err := ctrl.NewManager(testEnv.Config, ctrl.Options{ - Scheme: k8sClient.Scheme(), + Scheme: k8sClient.Scheme(), + MetricsBindAddress: "0", // Avoids port conflict with the default port 8080 }) assert.NoError(t, err) @@ -100,8 +106,15 @@ func TestIntegrationReconcileHypershift(t *testing.T) { // setupTestEnv starts the test environment (etcd and kube api-server). func setupTestEnv(t *testing.T) (*envtest.Environment, client.Client) { + rootPath := filepath.Join("..", "..", "..") + crds := readCRDFiles(t, + filepath.Join(rootPath, "multiclusterobservability", "config", "crd", "bases", "observability.open-cluster-management.io_multiclusterobservabilities.yaml"), + filepath.Join(rootPath, "endpointmetrics", "manifests", "prometheus", "crd", "servicemonitor_crd_0_53_1.yaml"), + filepath.Join(rootPath, "endpointmetrics", "manifests", "prometheus", "crd", "prometheusrule_crd_0_53_1.yaml"), + ) testEnv := &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("testdata", "crd"), filepath.Join("..", "..", "config", "crd", "bases")}, + CRDs: crds, } cfg, err := testEnv.Start() @@ -114,6 +127,7 @@ func setupTestEnv(t *testing.T) (*envtest.Environment, client.Client) { hyperv1.AddToScheme(scheme) promv1.AddToScheme(scheme) oav1beta1.AddToScheme(scheme) + mcov1beta2.AddToScheme(scheme) k8sClient, err := client.New(cfg, client.Options{Scheme: scheme}) if err != nil { @@ -128,6 +142,28 @@ func setupTestEnv(t *testing.T) (*envtest.Environment, client.Client) { return testEnv, k8sClient } +func readCRDFiles(t *testing.T, crdPaths ...string) []*apiextensionsv1.CustomResourceDefinition { + ret := []*apiextensionsv1.CustomResourceDefinition{} + + for _, crdPath := range crdPaths { + crdYamlData, err := os.ReadFile(crdPath) + if err != nil { + t.Fatalf("Failed to read CRD file: %v", err) + } + + dec := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + var crd apiextensionsv1.CustomResourceDefinition + _, _, err = dec.Decode(crdYamlData, nil, &crd) + if err != nil { + t.Fatalf("Failed to decode CRD: %v", err) + } + + ret = append(ret, &crd) + } + + return ret +} + func makeNamespace(name string) *corev1.Namespace { return &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/prometheusrule-crd.yaml b/operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/prometheusrule-crd.yaml deleted file mode 100644 index 1dd024388..000000000 --- a/operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/prometheusrule-crd.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.2 - creationTimestamp: null - name: prometheusrules.monitoring.coreos.com -spec: - group: monitoring.coreos.com - names: - categories: - - prometheus-operator - kind: PrometheusRule - listKind: PrometheusRuleList - plural: prometheusrules - singular: prometheusrule - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: PrometheusRule defines recording and alerting rules for a Prometheus - instance - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired alerting rule definitions for Prometheus. - properties: - groups: - description: Content of Prometheus rule file - items: - description: 'RuleGroup is a list of sequentially evaluated recording - and alerting rules. Note: PartialResponseStrategy is only used - by ThanosRuler and will be ignored by Prometheus instances. Valid - values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response' - properties: - interval: - type: string - name: - type: string - partial_response_strategy: - type: string - rules: - items: - description: 'Rule describes an alerting or recording rule - See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) - rule' - properties: - alert: - type: string - annotations: - additionalProperties: - type: string - type: object - expr: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - for: - type: string - labels: - additionalProperties: - type: string - type: object - record: - type: string - required: - - expr - type: object - type: array - required: - - name - - rules - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/servicemonitor-crd.json b/operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/servicemonitor-crd.json deleted file mode 100644 index f8683e129..000000000 --- a/operators/endpointmetrics/controllers/observabilityendpoint/testdata/crd/servicemonitor-crd.json +++ /dev/null @@ -1,780 +0,0 @@ -{ - "apiVersion": "apiextensions.k8s.io/v1", - "kind": "CustomResourceDefinition", - "metadata": { - "annotations": { - "controller-gen.kubebuilder.io/version": "v0.13.0", - "operator.prometheus.io/version": "0.71.2" - }, - "name": "servicemonitors.monitoring.coreos.com" - }, - "spec": { - "group": "monitoring.coreos.com", - "names": { - "categories": [ - "prometheus-operator" - ], - "kind": "ServiceMonitor", - "listKind": "ServiceMonitorList", - "plural": "servicemonitors", - "shortNames": [ - "smon" - ], - "singular": "servicemonitor" - }, - "scope": "Namespaced", - "versions": [ - { - "name": "v1", - "schema": { - "openAPIV3Schema": { - "description": "ServiceMonitor defines monitoring for a set of services.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "type": "object" - }, - "spec": { - "description": "Specification of desired Service selection for target discovery by Prometheus.", - "properties": { - "attachMetadata": { - "description": "`attachMetadata` defines additional metadata which is added to the discovered targets. \n It requires Prometheus >= v2.37.0.", - "properties": { - "node": { - "description": "When set to true, Prometheus must have the `get` permission on the `Nodes` objects.", - "type": "boolean" - } - }, - "type": "object" - }, - "endpoints": { - "description": "List of endpoints part of this ServiceMonitor.", - "items": { - "description": "Endpoint defines an endpoint serving Prometheus metrics to be scraped by Prometheus.", - "properties": { - "authorization": { - "description": "`authorization` configures the Authorization header credentials to use when scraping the target. \n Cannot be set at the same time as `basicAuth`, or `oauth2`.", - "properties": { - "credentials": { - "description": "Selects a key of a Secret in the namespace that contains the credentials for authentication.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "type": { - "description": "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"", - "type": "string" - } - }, - "type": "object" - }, - "basicAuth": { - "description": "`basicAuth` configures the Basic Authentication credentials to use when scraping the target. \n Cannot be set at the same time as `authorization`, or `oauth2`.", - "properties": { - "password": { - "description": "`password` specifies a key of a Secret containing the password for authentication.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "username": { - "description": "`username` specifies a key of a Secret containing the username for authentication.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - } - }, - "type": "object" - }, - "bearerTokenFile": { - "description": "File to read bearer token for scraping the target. \n Deprecated: use `authorization` instead.", - "type": "string" - }, - "bearerTokenSecret": { - "description": "`bearerTokenSecret` specifies a key of a Secret containing the bearer token for scraping targets. The secret needs to be in the same namespace as the ServiceMonitor object and readable by the Prometheus Operator. \n Deprecated: use `authorization` instead.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "enableHttp2": { - "description": "`enableHttp2` can be used to disable HTTP2 when scraping the target.", - "type": "boolean" - }, - "filterRunning": { - "description": "When true, the pods which are not running (e.g. either in Failed or Succeeded state) are dropped during the target discovery. \n If unset, the filtering is enabled. \n More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase", - "type": "boolean" - }, - "followRedirects": { - "description": "`followRedirects` defines whether the scrape requests should follow HTTP 3xx redirects.", - "type": "boolean" - }, - "honorLabels": { - "description": "When true, `honorLabels` preserves the metric's labels when they collide with the target's labels.", - "type": "boolean" - }, - "honorTimestamps": { - "description": "`honorTimestamps` controls whether Prometheus preserves the timestamps when exposed by the target.", - "type": "boolean" - }, - "interval": { - "description": "Interval at which Prometheus scrapes the metrics from the target. \n If empty, Prometheus uses the global scrape interval.", - "pattern": "^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$", - "type": "string" - }, - "metricRelabelings": { - "description": "`metricRelabelings` configures the relabeling rules to apply to the samples before ingestion.", - "items": { - "description": "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config", - "properties": { - "action": { - "default": "replace", - "description": "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"", - "enum": [ - "replace", - "Replace", - "keep", - "Keep", - "drop", - "Drop", - "hashmod", - "HashMod", - "labelmap", - "LabelMap", - "labeldrop", - "LabelDrop", - "labelkeep", - "LabelKeep", - "lowercase", - "Lowercase", - "uppercase", - "Uppercase", - "keepequal", - "KeepEqual", - "dropequal", - "DropEqual" - ], - "type": "string" - }, - "modulus": { - "description": "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`.", - "format": "int64", - "type": "integer" - }, - "regex": { - "description": "Regular expression against which the extracted value is matched.", - "type": "string" - }, - "replacement": { - "description": "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available.", - "type": "string" - }, - "separator": { - "description": "Separator is the string between concatenated SourceLabels.", - "type": "string" - }, - "sourceLabels": { - "description": "The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression.", - "items": { - "description": "LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores.", - "pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", - "type": "string" - }, - "type": "array" - }, - "targetLabel": { - "description": "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "oauth2": { - "description": "`oauth2` configures the OAuth2 settings to use when scraping the target. \n It requires Prometheus >= 2.27.0. \n Cannot be set at the same time as `authorization`, or `basicAuth`.", - "properties": { - "clientId": { - "description": "`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client's ID.", - "properties": { - "configMap": { - "description": "ConfigMap containing data to use for the targets.", - "properties": { - "key": { - "description": "The key to select.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "secret": { - "description": "Secret containing data to use for the targets.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - } - }, - "type": "object" - }, - "clientSecret": { - "description": "`clientSecret` specifies a key of a Secret containing the OAuth2 client's secret.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "endpointParams": { - "additionalProperties": { - "type": "string" - }, - "description": "`endpointParams` configures the HTTP parameters to append to the token URL.", - "type": "object" - }, - "scopes": { - "description": "`scopes` defines the OAuth2 scopes used for the token request.", - "items": { - "type": "string" - }, - "type": "array" - }, - "tokenUrl": { - "description": "`tokenURL` configures the URL to fetch the token from.", - "minLength": 1, - "type": "string" - } - }, - "required": [ - "clientId", - "clientSecret", - "tokenUrl" - ], - "type": "object" - }, - "params": { - "additionalProperties": { - "items": { - "type": "string" - }, - "type": "array" - }, - "description": "params define optional HTTP URL parameters.", - "type": "object" - }, - "path": { - "description": "HTTP path from which to scrape for metrics. \n If empty, Prometheus uses the default value (e.g. `/metrics`).", - "type": "string" - }, - "port": { - "description": "Name of the Service port which this endpoint refers to. \n It takes precedence over `targetPort`.", - "type": "string" - }, - "proxyUrl": { - "description": "`proxyURL` configures the HTTP Proxy URL (e.g. \"http://proxyserver:2195\") to go through when scraping the target.", - "type": "string" - }, - "relabelings": { - "description": "`relabelings` configures the relabeling rules to apply the target's metadata labels. \n The Operator automatically adds relabelings for a few standard Kubernetes fields. \n The original scrape job's name is available via the `__tmp_prometheus_job_name` label. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config", - "items": { - "description": "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config", - "properties": { - "action": { - "default": "replace", - "description": "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"", - "enum": [ - "replace", - "Replace", - "keep", - "Keep", - "drop", - "Drop", - "hashmod", - "HashMod", - "labelmap", - "LabelMap", - "labeldrop", - "LabelDrop", - "labelkeep", - "LabelKeep", - "lowercase", - "Lowercase", - "uppercase", - "Uppercase", - "keepequal", - "KeepEqual", - "dropequal", - "DropEqual" - ], - "type": "string" - }, - "modulus": { - "description": "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`.", - "format": "int64", - "type": "integer" - }, - "regex": { - "description": "Regular expression against which the extracted value is matched.", - "type": "string" - }, - "replacement": { - "description": "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available.", - "type": "string" - }, - "separator": { - "description": "Separator is the string between concatenated SourceLabels.", - "type": "string" - }, - "sourceLabels": { - "description": "The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression.", - "items": { - "description": "LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores.", - "pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$", - "type": "string" - }, - "type": "array" - }, - "targetLabel": { - "description": "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "scheme": { - "description": "HTTP scheme to use for scraping. \n `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. \n If empty, Prometheus uses the default value `http`.", - "enum": [ - "http", - "https" - ], - "type": "string" - }, - "scrapeTimeout": { - "description": "Timeout after which Prometheus considers the scrape to be failed. \n If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.", - "pattern": "^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$", - "type": "string" - }, - "targetPort": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "string" - } - ], - "description": "Name or number of the target port of the `Pod` object behind the Service. The port must be specified with the container's port property.", - "x-kubernetes-int-or-string": true - }, - "tlsConfig": { - "description": "TLS configuration to use when scraping the target.", - "properties": { - "ca": { - "description": "Certificate authority used when verifying server certificates.", - "properties": { - "configMap": { - "description": "ConfigMap containing data to use for the targets.", - "properties": { - "key": { - "description": "The key to select.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "secret": { - "description": "Secret containing data to use for the targets.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - } - }, - "type": "object" - }, - "caFile": { - "description": "Path to the CA cert in the Prometheus container to use for the targets.", - "type": "string" - }, - "cert": { - "description": "Client certificate to present when doing client-authentication.", - "properties": { - "configMap": { - "description": "ConfigMap containing data to use for the targets.", - "properties": { - "key": { - "description": "The key to select.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "secret": { - "description": "Secret containing data to use for the targets.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - } - }, - "type": "object" - }, - "certFile": { - "description": "Path to the client cert file in the Prometheus container for the targets.", - "type": "string" - }, - "insecureSkipVerify": { - "description": "Disable target certificate validation.", - "type": "boolean" - }, - "keyFile": { - "description": "Path to the client key file in the Prometheus container for the targets.", - "type": "string" - }, - "keySecret": { - "description": "Secret containing the client key file for the targets.", - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - }, - "required": [ - "key" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "serverName": { - "description": "Used to verify the hostname for the targets.", - "type": "string" - } - }, - "type": "object" - }, - "trackTimestampsStaleness": { - "description": "`trackTimestampsStaleness` defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false. \n It requires Prometheus >= v2.48.0.", - "type": "boolean" - } - }, - "type": "object" - }, - "type": "array" - }, - "jobLabel": { - "description": "`jobLabel` selects the label from the associated Kubernetes `Service` object which will be used as the `job` label for all metrics. \n For example if `jobLabel` is set to `foo` and the Kubernetes `Service` object is labeled with `foo: bar`, then Prometheus adds the `job=\"bar\"` label to all ingested metrics. \n If the value of this field is empty or if the label doesn't exist for the given Service, the `job` label of the metrics defaults to the name of the associated Kubernetes `Service`.", - "type": "string" - }, - "keepDroppedTargets": { - "description": "Per-scrape limit on the number of targets dropped by relabeling that will be kept in memory. 0 means no limit. \n It requires Prometheus >= v2.47.0.", - "format": "int64", - "type": "integer" - }, - "labelLimit": { - "description": "Per-scrape limit on number of labels that will be accepted for a sample. \n It requires Prometheus >= v2.27.0.", - "format": "int64", - "type": "integer" - }, - "labelNameLengthLimit": { - "description": "Per-scrape limit on length of labels name that will be accepted for a sample. \n It requires Prometheus >= v2.27.0.", - "format": "int64", - "type": "integer" - }, - "labelValueLengthLimit": { - "description": "Per-scrape limit on length of labels value that will be accepted for a sample. \n It requires Prometheus >= v2.27.0.", - "format": "int64", - "type": "integer" - }, - "namespaceSelector": { - "description": "Selector to select which namespaces the Kubernetes `Endpoints` objects are discovered from.", - "properties": { - "any": { - "description": "Boolean describing whether all namespaces are selected in contrast to a list restricting them.", - "type": "boolean" - }, - "matchNames": { - "description": "List of namespace names to select from.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "podTargetLabels": { - "description": "`podTargetLabels` defines the labels which are transferred from the associated Kubernetes `Pod` object onto the ingested metrics.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sampleLimit": { - "description": "`sampleLimit` defines a per-scrape limit on the number of scraped samples that will be accepted.", - "format": "int64", - "type": "integer" - }, - "scrapeProtocols": { - "description": "`scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the protocols supported by Prometheus in order of preference (from most to least preferred). \n If unset, Prometheus uses its default value. \n It requires Prometheus >= v2.49.0.", - "items": { - "description": "ScrapeProtocol represents a protocol used by Prometheus for scraping metrics. Supported values are: * `OpenMetricsText0.0.1` * `OpenMetricsText1.0.0` * `PrometheusProto` * `PrometheusText0.0.4`", - "enum": [ - "PrometheusProto", - "OpenMetricsText0.0.1", - "OpenMetricsText1.0.0", - "PrometheusText0.0.4" - ], - "type": "string" - }, - "type": "array", - "x-kubernetes-list-type": "set" - }, - "selector": { - "description": "Label selector to select the Kubernetes `Endpoints` objects.", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - "items": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string" - }, - "operator": { - "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - "type": "string" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "key", - "operator" - ], - "type": "object" - }, - "type": "array" - }, - "matchLabels": { - "additionalProperties": { - "type": "string" - }, - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "type": "object" - } - }, - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "targetLabels": { - "description": "`targetLabels` defines the labels which are transferred from the associated Kubernetes `Service` object onto the ingested metrics.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetLimit": { - "description": "`targetLimit` defines a limit on the number of scraped targets that will be accepted.", - "format": "int64", - "type": "integer" - } - }, - "required": [ - "selector" - ], - "type": "object" - } - }, - "required": [ - "spec" - ], - "type": "object" - } - }, - "served": true, - "storage": true - } - ] - } - } diff --git a/operators/endpointmetrics/controllers/status/status_controller_integration_test.go b/operators/endpointmetrics/controllers/status/status_controller_integration_test.go index 66f252583..b456792a4 100644 --- a/operators/endpointmetrics/controllers/status/status_controller_integration_test.go +++ b/operators/endpointmetrics/controllers/status/status_controller_integration_test.go @@ -36,6 +36,7 @@ func TestIntegrationReconcileStatus(t *testing.T) { hubNamespace := "hub-namespace" obsAddonName := "observability-addon" + // Setup spoke cluster testEnv, k8sClient := setupTestEnv(t) defer testEnv.Stop() @@ -48,6 +49,7 @@ func TestIntegrationReconcileStatus(t *testing.T) { t.Fatalf("Failed to create resources: %v", err) } + // Setup hub cluster hubTestEnv, hubK8sClient := setupTestEnv(t) defer hubTestEnv.Stop() @@ -59,8 +61,10 @@ func TestIntegrationReconcileStatus(t *testing.T) { t.Fatalf("Failed to create resources: %v", err) } + // Setup controller manager mgr, err := ctrl.NewManager(testEnv.Config, ctrl.Options{ - Scheme: k8sClient.Scheme(), + Scheme: k8sClient.Scheme(), + MetricsBindAddress: "0", // Avoids port conflict with the default port 8080 }) assert.NoError(t, err) @@ -85,6 +89,9 @@ func TestIntegrationReconcileStatus(t *testing.T) { assert.NoError(t, err) }() + // Test: + // Update on the spoke addon status should trigger an update on the hub addon status. + go func() { // Update spoke addon status concurrently to trigger the reconcile loop. addCondition(spokeObsAddon, "Deployed", metav1.ConditionTrue) @@ -102,7 +109,6 @@ func TestIntegrationReconcileStatus(t *testing.T) { assert.NoError(t, err) }() - // Hub addon status should be updated err = wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { hubObsAddon := &oav1beta1.ObservabilityAddon{} err := hubK8sClient.Get(context.Background(), types.NamespacedName{Name: obsAddonName, Namespace: hubNamespace}, hubObsAddon) diff --git a/scripts/install-binaries.sh b/scripts/install-binaries.sh index c12472f42..36d15114d 100755 --- a/scripts/install-binaries.sh +++ b/scripts/install-binaries.sh @@ -115,6 +115,12 @@ install_e2e_tests_deps() { install_kustomize ${bin_dir} } +install_envtest_deps() { + go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + bin_dir=${1:-${BIN_DIR}} + setup-envtest --bin-dir ${bin_dir} -p env use 1.30.x +} + # check if script is called directly, or sourced (return 0 2>/dev/null) && sourced=1 || sourced=0 # This allows functions within this file to be called individually from Makefile(s). From b950a0526cb4da149db359714771ae6e9b5e6696 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Wed, 22 May 2024 17:39:28 +0200 Subject: [PATCH 28/33] update prometheus operator crd (#1449) Signed-off-by: Coleen Iona Quadros --- .../prometheus/crd/kustomization.yaml | 2 +- ...0_68_0.yaml => prometheus_crd_0_73_2.yaml} | 1217 ++++++++++++++--- .../prometheus-operator-deployment.yaml | 2 +- 3 files changed, 1008 insertions(+), 213 deletions(-) rename operators/endpointmetrics/manifests/prometheus/crd/{prometheus_crd_0_68_0.yaml => prometheus_crd_0_73_2.yaml} (89%) diff --git a/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml b/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml index 9c9ff7280..1e3e5806f 100644 --- a/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml +++ b/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml @@ -3,7 +3,7 @@ resources: - alertmanagerconfig_crd_0_53_1.yaml - podmonitor_crd_0_53_1.yaml - probe_crd_0_53_1.yaml -- prometheus_crd_0_68_0.yaml +- prometheus_crd_0_73_2.yaml - prometheusrule_crd_0_53_1.yaml - servicemonitor_crd_0_53_1.yaml - thanosruler_crd_0_53_1.yaml diff --git a/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_68_0.yaml b/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_73_2.yaml similarity index 89% rename from operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_68_0.yaml rename to operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_73_2.yaml index b912437a1..a41c08e09 100644 --- a/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_68_0.yaml +++ b/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_73_2.yaml @@ -3,8 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.73.2 name: prometheuses.monitoring.coreos.com spec: group: monitoring.coreos.com @@ -410,7 +410,8 @@ spec: properties: labelSelector: description: A label query over a set of resources, - in this case pods. + in this case pods. If it's null, this PodAffinityTerm + matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label @@ -460,6 +461,44 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key in (value)` to select + the group of existing pods which pods will be + taken into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist in + the incoming pod labels will be ignored. The default + value is empty. The same key is forbidden to exist + in both MatchLabelKeys and LabelSelector. Also, + MatchLabelKeys cannot be set when LabelSelector + isn't set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` to + select the group of existing pods which pods will + be taken into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist in + the incoming pod labels will be ignored. The default + value is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. Also, + MismatchLabelKeys cannot be set when LabelSelector + isn't set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -570,7 +609,8 @@ spec: properties: labelSelector: description: A label query over a set of resources, - in this case pods. + in this case pods. If it's null, this PodAffinityTerm + matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label @@ -616,6 +656,43 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys + to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken into + consideration for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels will + be ignored. The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector + isn't set. This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select the + group of existing pods which pods will be taken into + consideration for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels will + be ignored. The default value is empty. The same key + is forbidden to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys cannot be set + when LabelSelector isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied to the @@ -719,7 +796,8 @@ spec: properties: labelSelector: description: A label query over a set of resources, - in this case pods. + in this case pods. If it's null, this PodAffinityTerm + matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label @@ -769,6 +847,44 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key in (value)` to select + the group of existing pods which pods will be + taken into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist in + the incoming pod labels will be ignored. The default + value is empty. The same key is forbidden to exist + in both MatchLabelKeys and LabelSelector. Also, + MatchLabelKeys cannot be set when LabelSelector + isn't set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` to + select the group of existing pods which pods will + be taken into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist in + the incoming pod labels will be ignored. The default + value is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. Also, + MismatchLabelKeys cannot be set when LabelSelector + isn't set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied @@ -879,7 +995,8 @@ spec: properties: labelSelector: description: A label query over a set of resources, - in this case pods. + in this case pods. If it's null, this PodAffinityTerm + matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label @@ -925,6 +1042,43 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys + to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken into + consideration for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels will + be ignored. The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector + isn't set. This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select the + group of existing pods which pods will be taken into + consideration for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels will + be ignored. The default value is empty. The same key + is forbidden to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys cannot be set + when LabelSelector isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. The term is applied to the @@ -1019,7 +1173,8 @@ spec: type: string authorization: description: "Authorization section for Alertmanager. \n - Cannot be set at the same time as `basicAuth`, or `bearerTokenFile`." + Cannot be set at the same time as `basicAuth`, `bearerTokenFile` + or `sigv4`." properties: credentials: description: Selects a key of a Secret in the namespace @@ -1051,11 +1206,11 @@ spec: basicAuth: description: "BasicAuth configuration for Alertmanager. \n Cannot be set at the same time as `bearerTokenFile`, - or `authorization`." + `authorization` or `sigv4`." properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. + description: '`password` specifies a key of a Secret + containing the password for authentication.' properties: key: description: The key of the secret to select from. Must @@ -1075,8 +1230,8 @@ spec: type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. + description: '`username` specifies a key of a Secret + containing the username for authentication.' properties: key: description: The key of the secret to select from. Must @@ -1098,9 +1253,9 @@ spec: type: object bearerTokenFile: description: "File to read bearer token for Alertmanager. - \n Cannot be set at the same time as `basicAuth`, or `authorization`. - \n *Deprecated: this will be removed in a future release. - Prefer using `authorization`.*" + \n Cannot be set at the same time as `basicAuth`, `authorization`, + or `sigv4`. \n Deprecated: this will be removed in a future + release. Prefer using `authorization`." type: string enableHttp2: description: Whether to enable HTTP2. @@ -1124,6 +1279,68 @@ spec: scheme: description: Scheme to use when firing alerts. type: string + sigv4: + description: "Sigv4 allows to configures AWS's Signature + Verification 4 for the URL. \n It requires Prometheus + >= v2.48.0. \n Cannot be set at the same time as `basicAuth`, + `bearerTokenFile` or `authorization`." + properties: + accessKey: + description: AccessKey is the AWS API key. If not specified, + the environment variable `AWS_ACCESS_KEY_ID` is used. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + profile: + description: Profile is the named AWS profile used to + authenticate. + type: string + region: + description: Region is the AWS region. If blank, the + region from the default credentials chain used. + type: string + roleArn: + description: RoleArn is the named AWS profile used to + authenticate. + type: string + secretKey: + description: SecretKey is the AWS API secret. If not + specified, the environment variable `AWS_SECRET_ACCESS_KEY` + is used. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object timeout: description: Timeout is a per-target Alertmanager timeout when pushing alerts. @@ -1279,9 +1496,9 @@ spec: type: object allowOverlappingBlocks: description: "AllowOverlappingBlocks enables vertical compaction and - vertical query merge in Prometheus. \n *Deprecated: this flag has + vertical query merge in Prometheus. \n Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are - enabled by default.*" + enabled by default." type: boolean apiserverConfig: description: 'APIServerConfig allows specifying a host and auth methods @@ -1329,8 +1546,8 @@ spec: `bearerTokenFile`." properties: password: - description: The secret in the service monitor namespace that - contains the password for authentication. + description: '`password` specifies a key of a Secret containing + the password for authentication.' properties: key: description: The key of the secret to select from. Must @@ -1349,8 +1566,8 @@ spec: type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace that - contains the username for authentication. + description: '`username` specifies a key of a Secret containing + the username for authentication.' properties: key: description: The key of the secret to select from. Must @@ -1372,13 +1589,13 @@ spec: bearerToken: description: "*Warning: this field shouldn't be used because the token value appears in clear-text. Prefer using `authorization`.* - \n *Deprecated: this will be removed in a future release.*" + \n Deprecated: this will be removed in a future release." type: string bearerTokenFile: description: "File to read bearer token for accessing apiserver. \n Cannot be set at the same time as `basicAuth`, `authorization`, - or `bearerToken`. \n *Deprecated: this will be removed in a - future release. Prefer using `authorization`.*" + or `bearerToken`. \n Deprecated: this will be removed in a future + release. Prefer using `authorization`." type: string host: description: Kubernetes API address consisting of a hostname or @@ -1532,7 +1749,7 @@ spec: type: boolean type: object baseImage: - description: '*Deprecated: use ''spec.image'' instead.*' + description: 'Deprecated: use ''spec.image'' instead.' type: string bodySizeLimit: description: BodySizeLimit defines per-scrape on response body size. @@ -1840,6 +2057,18 @@ spec: required: - port type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. @@ -1938,6 +2167,18 @@ spec: required: - port type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. @@ -2931,9 +3172,9 @@ spec: limit on the number of scraped samples that will be accepted. This overrides any `spec.sampleLimit` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.sampleLimit` is greater than zero and - less than than `spec.enforcedSampleLimit`. \n It is meant to be - used by admins to keep the overall number of samples/series under - a desired limit." + less than `spec.enforcedSampleLimit`. \n It is meant to be used + by admins to keep the overall number of samples/series under a desired + limit." format: int64 type: integer enforcedTargetLimit: @@ -3049,7 +3290,7 @@ spec: description: When true, `spec.namespaceSelector` from all PodMonitor, ServiceMonitor and Probe objects will be ignored. They will only discover targets within the namespace of the PodMonitor, ServiceMonitor - and Probe objec. + and Probe object. type: boolean image: description: "Container image name for Prometheus. If specified, it @@ -3377,6 +3618,18 @@ spec: required: - port type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. @@ -3475,6 +3728,18 @@ spec: required: - port type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. @@ -4423,6 +4688,15 @@ spec: - warn - error type: string + maximumStartupDurationSeconds: + description: Defines the maximum time that the `prometheus` container's + startup probe will wait before being considered failed. The startup + probe will return success after the WAL replay is complete. If set, + the value should be greater than 60 (seconds). Otherwise it will + be equal to 600 seconds (15 minutes). + format: int32 + minimum: 60 + type: integer minReadySeconds: description: "Minimum number of seconds for which a newly created Pod should be ready without any of its container crashing for it @@ -4452,9 +4726,40 @@ spec: description: When a Prometheus deployment is paused, no actions except for deletion will be performed on the underlying objects. type: boolean + persistentVolumeClaimRetentionPolicy: + description: The field controls if and how PVCs are deleted during + the lifecycle of a StatefulSet. The default behavior is all PVCs + are retained. This is an alpha field from kubernetes 1.23 until + 1.26 and a beta field from 1.26. It requires enabling the StatefulSetAutoDeletePVC + feature gate. + properties: + whenDeleted: + description: WhenDeleted specifies what happens to PVCs created + from StatefulSet VolumeClaimTemplates when the StatefulSet is + deleted. The default policy of `Retain` causes PVCs to not be + affected by StatefulSet deletion. The `Delete` policy causes + those PVCs to be deleted. + type: string + whenScaled: + description: WhenScaled specifies what happens to PVCs created + from StatefulSet VolumeClaimTemplates when the StatefulSet is + scaled down. The default policy of `Retain` causes PVCs to not + be affected by a scaledown. The `Delete` policy causes the associated + PVCs for any excess pods above the replica count to be deleted. + type: string + type: object podMetadata: - description: PodMetadata configures labels and annotations which are - propagated to the Prometheus pods. + description: "PodMetadata configures labels and annotations which + are propagated to the Prometheus pods. \n The following items are + reserved and cannot be overridden: * \"prometheus\" label, set to + the name of the Prometheus object. * \"app.kubernetes.io/instance\" + label, set to the name of the Prometheus object. * \"app.kubernetes.io/managed-by\" + label, set to \"prometheus-operator\". * \"app.kubernetes.io/name\" + label, set to \"prometheus\". * \"app.kubernetes.io/version\" label, + set to the Prometheus version. * \"operator.prometheus.io/name\" + label, set to the name of the Prometheus object. * \"operator.prometheus.io/shard\" + label, set to the shard number of the Prometheus object. * \"kubectl.kubernetes.io/default-container\" + annotation, set to \"prometheus\"." properties: annotations: additionalProperties: @@ -4528,17 +4833,17 @@ spec: type: object x-kubernetes-map-type: atomic podMonitorSelector: - description: "*Experimental* PodMonitors to be selected for target - discovery. An empty label selector matches all objects. A null label - selector matches no objects. \n If `spec.serviceMonitorSelector`, - `spec.podMonitorSelector`, `spec.probeSelector` and `spec.scrapeConfigSelector` - are null, the Prometheus configuration is unmanaged. The Prometheus - operator will ensure that the Prometheus configuration's Secret - exists, but it is the responsibility of the user to provide the - raw gzipped Prometheus configuration under the `prometheus.yaml.gz` - key. This behavior is *deprecated* and will be removed in the next - major version of the custom resource definition. It is recommended - to use `spec.additionalScrapeConfigs` instead." + description: "PodMonitors to be selected for target discovery. An + empty label selector matches all objects. A null label selector + matches no objects. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, + `spec.probeSelector` and `spec.scrapeConfigSelector` are null, the + Prometheus configuration is unmanaged. The Prometheus operator will + ensure that the Prometheus configuration's Secret exists, but it + is the responsibility of the user to provide the raw gzipped Prometheus + configuration under the `prometheus.yaml.gz` key. This behavior + is *deprecated* and will be removed in the next major version of + the custom resource definition. It is recommended to use `spec.additionalScrapeConfigs` + instead." properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -4597,9 +4902,9 @@ spec: description: Priority class assigned to the Pods. type: string probeNamespaceSelector: - description: '*Experimental* Namespaces to match for Probe discovery. - An empty label selector matches all namespaces. A null label selector - matches the current namespace only.' + description: Namespaces to match for Probe discovery. An empty label + selector matches all namespaces. A null label selector matches the + current namespace only. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -4644,9 +4949,9 @@ spec: type: object x-kubernetes-map-type: atomic probeSelector: - description: "*Experimental* Probes to be selected for target discovery. - An empty label selector matches all objects. A null label selector - matches no objects. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, + description: "Probes to be selected for target discovery. An empty + label selector matches all objects. A null label selector matches + no objects. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. The Prometheus operator will ensure that the Prometheus configuration's Secret exists, but it @@ -4707,8 +5012,8 @@ spec: prometheusRulesExcludedFromEnforce: description: 'Defines the list of PrometheusRule objects to which the namespace label enforcement doesn''t apply. This is only relevant - when `spec.enforcedNamespaceLabel` is set to true. *Deprecated: - use `spec.excludedFromEnforcement` instead.*' + when `spec.enforcedNamespaceLabel` is set to true. Deprecated: use + `spec.excludedFromEnforcement` instead.' items: description: PrometheusRuleExcludeConfig enables users to configure excluded PrometheusRule names and their namespaces to be ignored @@ -4761,6 +5066,14 @@ spec: can be set to a standard I/O stream, e.g. `/dev/stdout`, to log query information to the default Prometheus log stream." type: string + reloadStrategy: + description: Defines the strategy used to reload the Prometheus configuration. + If not specified, the configuration is reloaded using the /-/reload + HTTP endpoint. + enum: + - HTTP + - ProcessSignal + type: string remoteRead: description: Defines the list of remote read configurations. items: @@ -4807,8 +5120,8 @@ spec: be set at the same time as `authorization`, or `oauth2`." properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. + description: '`password` specifies a key of a Secret containing + the password for authentication.' properties: key: description: The key of the secret to select from. Must @@ -4827,8 +5140,8 @@ spec: type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. + description: '`username` specifies a key of a Secret containing + the username for authentication.' properties: key: description: The key of the secret to select from. Must @@ -4850,12 +5163,12 @@ spec: bearerToken: description: "*Warning: this field shouldn't be used because the token value appears in clear-text. Prefer using `authorization`.* - \n *Deprecated: this will be removed in a future release.*" + \n Deprecated: this will be removed in a future release." type: string bearerTokenFile: description: "File from which to read the bearer token for the - URL. \n *Deprecated: this will be removed in a future release. - Prefer using `authorization`.*" + URL. \n Deprecated: this will be removed in a future release. + Prefer using `authorization`." type: string filterExternalLabels: description: "Whether to use the external labels as selectors @@ -4886,8 +5199,8 @@ spec: `authorization`, or `basicAuth`." properties: clientId: - description: The secret or configmap containing the OAuth2 - client id + description: '`clientId` specifies a key of a Secret or + ConfigMap containing the OAuth2 client''s ID.' properties: configMap: description: ConfigMap containing data to use for the @@ -4931,7 +5244,8 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret + description: '`clientSecret` specifies a key of a Secret + containing the OAuth2 client''s secret.' properties: key: description: The key of the secret to select from. Must @@ -4952,15 +5266,18 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL + description: '`endpointParams` configures the HTTP parameters + to append to the token URL.' type: object scopes: - description: OAuth2 scopes used for the token request + description: '`scopes` defines the OAuth2 scopes used for + the token request.' items: type: string type: array tokenUrl: - description: The URL to fetch the token from + description: '`tokenURL` configures the URL to fetch the + token from.' minLength: 1 type: string required: @@ -5132,7 +5449,7 @@ spec: authorization: description: "Authorization section for the URL. \n It requires Prometheus >= v2.26.0. \n Cannot be set at the same time as - `sigv4`, `basicAuth`, or `oauth2`." + `sigv4`, `basicAuth`, `oauth2`, or `azureAd`." properties: credentials: description: Selects a key of a Secret in the namespace @@ -5164,13 +5481,83 @@ spec: \n Default: \"Bearer\"" type: string type: object + azureAd: + description: "AzureAD for the URL. \n It requires Prometheus + >= v2.45.0. \n Cannot be set at the same time as `authorization`, + `basicAuth`, `oauth2`, or `sigv4`." + properties: + cloud: + description: The Azure Cloud. Options are 'AzurePublic', + 'AzureChina', or 'AzureGovernment'. + enum: + - AzureChina + - AzureGovernment + - AzurePublic + type: string + managedIdentity: + description: ManagedIdentity defines the Azure User-assigned + Managed identity. Cannot be set at the same time as `oauth`. + properties: + clientId: + description: The client id + type: string + required: + - clientId + type: object + oauth: + description: "OAuth defines the oauth config that is being + used to authenticate. Cannot be set at the same time as + `managedIdentity`. \n It requires Prometheus >= v2.48.0." + properties: + clientId: + description: '`clientID` is the clientId of the Azure + Active Directory application that is being used to + authenticate.' + minLength: 1 + type: string + clientSecret: + description: '`clientSecret` specifies a key of a Secret + containing the client secret of the Azure Active Directory + application that is being used to authenticate.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + tenantId: + description: '`tenantID` is the tenant ID of the Azure + Active Directory application that is being used to + authenticate.' + minLength: 1 + pattern: ^[0-9a-zA-Z-.]+$ + type: string + required: + - clientId + - clientSecret + - tenantId + type: object + type: object basicAuth: description: "BasicAuth configuration for the URL. \n Cannot - be set at the same time as `sigv4`, `authorization`, or `oauth2`." + be set at the same time as `sigv4`, `authorization`, `oauth2`, + or `azureAd`." properties: password: - description: The secret in the service monitor namespace - that contains the password for authentication. + description: '`password` specifies a key of a Secret containing + the password for authentication.' properties: key: description: The key of the secret to select from. Must @@ -5189,8 +5576,8 @@ spec: type: object x-kubernetes-map-type: atomic username: - description: The secret in the service monitor namespace - that contains the username for authentication. + description: '`username` specifies a key of a Secret containing + the username for authentication.' properties: key: description: The key of the secret to select from. Must @@ -5212,13 +5599,16 @@ spec: bearerToken: description: "*Warning: this field shouldn't be used because the token value appears in clear-text. Prefer using `authorization`.* - \n *Deprecated: this will be removed in a future release.*" + \n Deprecated: this will be removed in a future release." type: string bearerTokenFile: description: "File from which to read bearer token for the URL. - \n *Deprecated: this will be removed in a future release. - Prefer using `authorization`.*" + \n Deprecated: this will be removed in a future release. Prefer + using `authorization`." type: string + enableHTTP2: + description: Whether to enable HTTP2. + type: boolean headers: additionalProperties: type: string @@ -5250,11 +5640,11 @@ spec: oauth2: description: "OAuth2 configuration for the URL. \n It requires Prometheus >= v2.27.0. \n Cannot be set at the same time as - `sigv4`, `authorization`, or `basicAuth`." + `sigv4`, `authorization`, `basicAuth`, or `azureAd`." properties: clientId: - description: The secret or configmap containing the OAuth2 - client id + description: '`clientId` specifies a key of a Secret or + ConfigMap containing the OAuth2 client''s ID.' properties: configMap: description: ConfigMap containing data to use for the @@ -5298,7 +5688,8 @@ spec: x-kubernetes-map-type: atomic type: object clientSecret: - description: The secret containing the OAuth2 client secret + description: '`clientSecret` specifies a key of a Secret + containing the OAuth2 client''s secret.' properties: key: description: The key of the secret to select from. Must @@ -5319,15 +5710,18 @@ spec: endpointParams: additionalProperties: type: string - description: Parameters to append to the token URL + description: '`endpointParams` configures the HTTP parameters + to append to the token URL.' type: object scopes: - description: OAuth2 scopes used for the token request + description: '`scopes` defines the OAuth2 scopes used for + the token request.' items: type: string type: array tokenUrl: - description: The URL to fetch the token from + description: '`tokenURL` configures the URL to fetch the + token from.' minLength: 1 type: string required: @@ -5345,6 +5739,7 @@ spec: batchSendDeadline: description: BatchSendDeadline is the maximum time a sample will wait in buffer. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string capacity: description: Capacity is the number of samples to buffer @@ -5352,6 +5747,7 @@ spec: type: integer maxBackoff: description: MaxBackoff is the maximum retry delay. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string maxRetries: description: MaxRetries is the maximum number of times to @@ -5368,16 +5764,23 @@ spec: minBackoff: description: MinBackoff is the initial retry delay. Gets doubled for every retry. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string minShards: description: MinShards is the minimum number of shards, i.e. amount of concurrency. type: integer retryOnRateLimit: - description: Retry upon receiving a 429 status code from - the remote-write storage. This is experimental feature - and might change in the future. + description: "Retry upon receiving a 429 status code from + the remote-write storage. \n This is an *experimental + feature*, it may change in any upcoming release in a breaking + way." type: boolean + sampleAgeLimit: + description: SampleAgeLimit drops samples older than the + limit. It requires Prometheus >= v2.50.0. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string type: object remoteTimeout: description: Timeout for requests to the remote write endpoint. @@ -5397,8 +5800,8 @@ spec: sigv4: description: "Sigv4 allows to configures AWS's Signature Verification 4 for the URL. \n It requires Prometheus >= v2.26.0. \n Cannot - be set at the same time as `authorization`, `basicAuth`, or - `oauth2`." + be set at the same time as `authorization`, `basicAuth`, `oauth2`, + or `azureAd`." properties: accessKey: description: AccessKey is the AWS API key. If not specified, @@ -5864,10 +6267,247 @@ spec: 2.45.0 and newer. format: int64 type: integer + scrapeClasses: + description: "List of scrape classes to expose to scraping objects + such as PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. + \n This is an *experimental feature*, it may change in any upcoming + release in a breaking way." + items: + properties: + default: + description: "Default indicates that the scrape applies to all + scrape objects that don't configure an explicit scrape class + name. \n Only one scrape class can be set as default." + type: boolean + name: + description: Name of the scrape class. + minLength: 1 + type: string + relabelings: + description: "Relabelings configures the relabeling rules to + apply to all scrape targets. \n The Operator automatically + adds relabelings for a few standard Kubernetes fields like + `__meta_kubernetes_namespace` and `__meta_kubernetes_service_name`. + Then the Operator adds the scrape class relabelings defined + here. Then the Operator adds the target-specific relabelings + defined in the scrape object. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + items: + description: "RelabelConfig allows dynamic rewriting of the + label set for targets, alerts, scraped samples and remote + write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. + \n `Uppercase` and `Lowercase` actions require Prometheus + >= v2.36.0. `DropEqual` and `KeepEqual` actions require + Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source + label values. \n Only applicable when the action is + `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: "Replacement value against which a Replace + action is performed if the regular expression matches. + \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + Separator and matched against the configured regular + expression. + items: + description: LabelName is a valid Prometheus label name + which may only contain ASCII letters, numbers, as + well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written + in a replacement. \n It is mandatory for `Replace`, + `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and + `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + tlsConfig: + description: TLSConfig section for scrapes. + properties: + ca: + description: Certificate authority used when verifying server + certificates. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map scrapeConfigNamespaceSelector: - description: Namespaces to match for ScrapeConfig discovery. An empty - label selector matches all namespaces. A null label selector matches - the current current namespace only. + description: "Namespaces to match for ScrapeConfig discovery. An empty + label selector matches all namespaces. A null label selector matches + the current namespace only. \n Note that the ScrapeConfig custom + resource definition is currently at Alpha level." properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -5912,17 +6552,18 @@ spec: type: object x-kubernetes-map-type: atomic scrapeConfigSelector: - description: "*Experimental* ScrapeConfigs to be selected for target - discovery. An empty label selector matches all objects. A null label - selector matches no objects. \n If `spec.serviceMonitorSelector`, - `spec.podMonitorSelector`, `spec.probeSelector` and `spec.scrapeConfigSelector` - are null, the Prometheus configuration is unmanaged. The Prometheus - operator will ensure that the Prometheus configuration's Secret - exists, but it is the responsibility of the user to provide the - raw gzipped Prometheus configuration under the `prometheus.yaml.gz` - key. This behavior is *deprecated* and will be removed in the next - major version of the custom resource definition. It is recommended - to use `spec.additionalScrapeConfigs` instead." + description: "ScrapeConfigs to be selected for target discovery. An + empty label selector matches all objects. A null label selector + matches no objects. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, + `spec.probeSelector` and `spec.scrapeConfigSelector` are null, the + Prometheus configuration is unmanaged. The Prometheus operator will + ensure that the Prometheus configuration's Secret exists, but it + is the responsibility of the user to provide the raw gzipped Prometheus + configuration under the `prometheus.yaml.gz` key. This behavior + is *deprecated* and will be removed in the next major version of + the custom resource definition. It is recommended to use `spec.additionalScrapeConfigs` + instead. \n Note that the ScrapeConfig custom resource definition + is currently at Alpha level." properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -5971,6 +6612,23 @@ spec: description: "Interval between consecutive scrapes. \n Default: \"30s\"" pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string + scrapeProtocols: + description: "The protocols to negotiate during a scrape. It tells + clients the protocols supported by Prometheus in order of preference + (from most to least preferred). \n If unset, Prometheus uses its + default value. \n It requires Prometheus >= v2.49.0." + items: + description: 'ScrapeProtocol represents a protocol used by Prometheus + for scraping metrics. Supported values are: * `OpenMetricsText0.0.1` + * `OpenMetricsText1.0.0` * `PrometheusProto` * `PrometheusText0.0.4`' + enum: + - PrometheusProto + - OpenMetricsText0.0.1 + - OpenMetricsText1.0.0 + - PrometheusText0.0.4 + type: string + type: array + x-kubernetes-list-type: set scrapeTimeout: description: Number of seconds to wait until a scrape request times out. @@ -6259,17 +6917,17 @@ spec: type: object x-kubernetes-map-type: atomic sha: - description: '*Deprecated: use ''spec.image'' instead. The image''s - digest can be specified as part of the image name.*' + description: 'Deprecated: use ''spec.image'' instead. The image''s + digest can be specified as part of the image name.' type: string shards: - description: "EXPERIMENTAL: Number of shards to distribute targets - onto. `spec.replicas` multiplied by `spec.shards` is the total number - of Pods created. \n Note that scaling down shards will not reshard - data onto remaining instances, it must be manually moved. Increasing - shards will not reshard data either but it will continue to be available - from the same instances. To query globally, use Thanos sidecar and - Thanos querier or remote write data to a central location. \n Sharding + description: "Number of shards to distribute targets onto. `spec.replicas` + multiplied by `spec.shards` is the total number of Pods created. + \n Note that scaling down shards will not reshard data onto remaining + instances, it must be manually moved. Increasing shards will not + reshard data either but it will continue to be available from the + same instances. To query globally, use Thanos sidecar and Thanos + querier or remote write data to a central location. \n Sharding is performed on the content of the `__address__` target meta-label for PodMonitors and ServiceMonitors and `__param_target__` for Probes. \n Default: 1" @@ -6279,8 +6937,8 @@ spec: description: Storage defines the storage used by Prometheus. properties: disableMountSubPath: - description: '*Deprecated: subPath usage will be removed in a - future release.*' + description: 'Deprecated: subPath usage will be removed in a future + release.' type: boolean emptyDir: description: 'EmptyDirVolumeSource to be used by the StatefulSet. @@ -6453,30 +7111,6 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: - claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It - can only be set for containers." - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of - one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes - that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6555,6 +7189,27 @@ spec: description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update + the volume with the attributes defined in the corresponding + VolumeAttributesClass. This has a different purpose + than storageClassName, it can be changed after the + claim is created. An empty string value means that + no VolumeAttributesClass will be applied to the + claim but it''s not allowed to reset this field + to empty string once it is set. If unspecified and + the PersistentVolumeClaim is unbound, the default + VolumeAttributesClass will be set by the persistentvolume + controller if it exists. If the resource referred + to by volumeAttributesClass does not exist, this + PersistentVolumeClaim will be set to a Pending state, + as reflected by the modifyVolumeStatus field, until + such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string volumeMode: description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is @@ -6721,30 +7376,6 @@ spec: must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: - claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used by - this container. \n This is an alpha field and requires - enabling the DynamicResourceAllocation feature gate. - \n This field is immutable. It can only be set for - containers." - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one - entry in pod.spec.resourceClaims of the Pod - where this field is used. It makes that resource - available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6821,6 +7452,26 @@ spec: description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used to + set the VolumeAttributesClass used by this claim. If + specified, the CSI driver will create or update the + volume with the attributes defined in the corresponding + VolumeAttributesClass. This has a different purpose + than storageClassName, it can be changed after the claim + is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed to + reset this field to empty string once it is set. If + unspecified and the PersistentVolumeClaim is unbound, + the default VolumeAttributesClass will be set by the + persistentvolume controller if it exists. If the resource + referred to by volumeAttributesClass does not exist, + this PersistentVolumeClaim will be set to a Pending + state, as reflected by the modifyVolumeStatus field, + until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string volumeMode: description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied @@ -6832,7 +7483,7 @@ spec: type: string type: object status: - description: '*Deprecated: this field is never set.*' + description: 'Deprecated: this field is never set.' properties: accessModes: description: 'accessModes contains the actual access modes @@ -6968,6 +7619,42 @@ spec: - type type: object type: array + currentVolumeAttributesClassName: + description: currentVolumeAttributesClassName is the current + name of the VolumeAttributesClass the PVC is using. + When unset, there is no VolumeAttributeClass applied + to this PersistentVolumeClaim This is an alpha field + and requires enabling VolumeAttributesClass feature. + type: string + modifyVolumeStatus: + description: ModifyVolumeStatus represents the status + object of ControllerModifyVolume operation. When this + is unset, there is no ModifyVolume operation being attempted. + This is an alpha field and requires enabling VolumeAttributesClass + feature. + properties: + status: + description: 'status is the status of the ControllerModifyVolume + operation. It can be in any of following states: + - Pending Pending indicates that the PersistentVolumeClaim + cannot be modified due to unmet requirements, such + as the specified VolumeAttributesClass not existing. + - InProgress InProgress indicates that the volume + is being modified. - Infeasible Infeasible indicates + that the request has been rejected as invalid by + the CSI driver. To resolve the error, a valid VolumeAttributesClass + needs to be specified. Note: New statuses can be + added in the future. Consumers should check for + unknown statuses and fail appropriately.' + type: string + targetVolumeAttributesClassName: + description: targetVolumeAttributesClassName is the + name of the VolumeAttributesClass the PVC currently + being reconciled + type: string + required: + - status + type: object phase: description: phase represents the current phase of PersistentVolumeClaim. type: string @@ -6975,8 +7662,8 @@ spec: type: object type: object tag: - description: '*Deprecated: use ''spec.image'' instead. The image''s - tag can be specified as part of the image name.*' + description: 'Deprecated: use ''spec.image'' instead. The image''s + tag can be specified as part of the image name.' type: string targetLimit: description: TargetLimit defines a limit on the number of scraped @@ -6985,9 +7672,7 @@ spec: format: int64 type: integer thanos: - description: "Defines the configuration of the optional Thanos sidecar. - \n This section is experimental, it may change significantly without - deprecation notice in any release." + description: Defines the configuration of the optional Thanos sidecar. properties: additionalArgs: description: AdditionalArgs allows setting additional arguments @@ -7013,7 +7698,7 @@ spec: type: object type: array baseImage: - description: '*Deprecated: use ''image'' instead.*' + description: 'Deprecated: use ''image'' instead.' type: string blockSize: default: 2h @@ -7190,8 +7875,8 @@ spec: when the operator was released." type: string listenLocal: - description: '*Deprecated: use `grpcListenLocal` and `httpListenLocal` - instead.*' + description: 'Deprecated: use `grpcListenLocal` and `httpListenLocal` + instead.' type: boolean logFormat: description: Log format for the Thanos sidecar. @@ -7298,19 +7983,19 @@ spec: type: object type: object sha: - description: '*Deprecated: use ''image'' instead. The image digest - can be specified as part of the image name.*' + description: 'Deprecated: use ''image'' instead. The image digest + can be specified as part of the image name.' type: string tag: - description: '*Deprecated: use ''image'' instead. The image''s - tag can be specified as part of the image name.*' + description: 'Deprecated: use ''image'' instead. The image''s + tag can be specified as as part of the image name.' type: string tracingConfig: description: "Defines the tracing configuration for the Thanos - sidecar. \n More info: https://thanos.io/tip/thanos/tracing.md/ - \n This is an experimental feature, it may change in any upcoming - release in a breaking way. \n tracingConfigFile takes precedence - over this field." + sidecar. \n `tracingConfigFile` takes precedence over this field. + \n More info: https://thanos.io/tip/thanos/tracing.md/ \n This + is an *experimental feature*, it may change in any upcoming + release in a breaking way." properties: key: description: The key of the secret to select from. Must be @@ -7330,10 +8015,10 @@ spec: x-kubernetes-map-type: atomic tracingConfigFile: description: "Defines the tracing configuration file for the Thanos - sidecar. \n More info: https://thanos.io/tip/thanos/tracing.md/ - \n This is an experimental feature, it may change in any upcoming - release in a breaking way. \n This field takes precedence over - tracingConfig." + sidecar. \n This field takes precedence over `tracingConfig`. + \n More info: https://thanos.io/tip/thanos/tracing.md/ \n This + is an *experimental feature*, it may change in any upcoming + release in a breaking way." type: string version: description: "Version of Thanos being deployed. The operator uses @@ -7428,9 +8113,14 @@ spec: topologySpreadConstraints: description: Defines the pod's topology spread constraints if specified. items: - description: TopologySpreadConstraint specifies how to spread matching - pods among the given topology. properties: + additionalLabelSelectors: + description: Defines what Prometheus Operator managed labels + should be added to labelSelector on the topologySpreadConstraint. + enum: + - OnResource + - OnShard + type: string labelSelector: description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the @@ -7598,9 +8288,9 @@ spec: type: object type: array tracingConfig: - description: 'EXPERIMENTAL: TracingConfig configures tracing in Prometheus. - This is an experimental feature, it may change in any upcoming release - in a breaking way.' + description: "TracingConfig configures tracing in Prometheus. \n This + is an *experimental feature*, it may change in any upcoming release + in a breaking way." properties: clientType: description: Client used to export the traces. Supported values @@ -7784,9 +8474,9 @@ spec: description: "Configures how old an out-of-order/out-of-bounds sample can be with respect to the TSDB max time. \n An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp of - the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). \n Out - of order ingestion is an experimental feature. \n It requires - Prometheus >= v2.39.0." + the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). \n This + is an *experimental feature*, it may change in any upcoming + release in a breaking way. \n It requires Prometheus >= v2.39.0." pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ type: string type: object @@ -8396,31 +9086,6 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: - claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." - items: - description: ResourceClaim references one - entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -8503,6 +9168,28 @@ spec: StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string volumeMode: description: volumeMode defines what type of volume is required by the claim. Value of Filesystem @@ -8877,6 +9564,102 @@ spec: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project @@ -9676,6 +10459,10 @@ spec: Prometheus deployment (their labels match the selector). format: int32 type: integer + selector: + description: The selector used to match the pods targeted by this + Prometheus resource. + type: string shardStatuses: description: The list has one entry per shard. Each entry provides a summary of the shard status. @@ -9714,6 +10501,10 @@ spec: x-kubernetes-list-map-keys: - shardID x-kubernetes-list-type: map + shards: + description: Shards is the most recently observed number of shards. + format: int32 + type: integer unavailableReplicas: description: Total number of unavailable pods targeted by this Prometheus deployment. @@ -9737,4 +10528,8 @@ spec: served: true storage: true subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.shards + statusReplicasPath: .status.shards status: {} \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml index 0ce39364b..0eede809d 100644 --- a/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml @@ -25,7 +25,7 @@ spec: - '--kubelet-service=kube-system/kubelet' - '--prometheus-config-reloader={{PROM_CONFIGMAP_RELOADER_IMG}}' - '--namespaces={{NAMESPACE}}' - image: quay.io/prometheus-operator/prometheus-operator:v0.68.0 + image: quay.io/prometheus-operator/prometheus-operator:v0.73.2 imagePullPolicy: IfNotPresent name: prometheus-operator ports: From ae41f1869a36f7197443869900d011b93405fda9 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 2 May 2024 16:48:32 +0200 Subject: [PATCH 29/33] refactor to accomodate managed cluster Signed-off-by: Coleen Iona Quadros --- tests/pkg/tests/observability_endpoint_preserve_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index c2a464077..3545c84e8 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -213,7 +213,10 @@ func runMetricsCollectorTests(clusterConfig utils.Cluster) { var _ = Describe("Observability:", func() { for _, clusterConfig := range testOptions.ManagedClusters { +<<<<<<< HEAD klog.Error("Coleen Running metrics collector tests for cluster: ", clusterConfig.Name) +======= +>>>>>>> f4c3e33d (refactor to accomodate managed cluster) runMetricsCollectorTests(clusterConfig) } }) From 3d38695302663393d9f35cecf339993fb537dbee Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 23 May 2024 12:12:08 +0200 Subject: [PATCH 30/33] remove yaml additions Signed-off-by: Coleen Iona Quadros --- examples/mco/e2e/v1beta1/observability.yaml | 32 ------------------- examples/mco/e2e/v1beta2/observability.yaml | 23 ------------- examples/minio-tls/minio-pvc.yaml | 2 +- examples/minio/minio-pvc.yaml | 2 +- .../config/manager/kustomization.yaml | 2 +- .../manifests/base/grafana/deployment.yaml | 23 +++++++------ 6 files changed, 16 insertions(+), 68 deletions(-) diff --git a/examples/mco/e2e/v1beta1/observability.yaml b/examples/mco/e2e/v1beta1/observability.yaml index 0d55e03f6..ee59f4ce4 100644 --- a/examples/mco/e2e/v1beta1/observability.yaml +++ b/examples/mco/e2e/v1beta1/observability.yaml @@ -3,38 +3,6 @@ kind: MultiClusterObservability metadata: name: observability annotations: - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - test-env: kind-test - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 spec: nodeSelector: kubernetes.io/os: linux diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index c4363d5f9..b620469dc 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -3,29 +3,6 @@ kind: MultiClusterObservability metadata: name: observability annotations: - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-11-47-23 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-13-13-24 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-endpoint_monitoring_operator-image: quay.io/stolostron/endpoint-monitoring-operator:2.11.0-PR1421-486aa705da1a48b3f474b9d29515a1b58edc4482 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-07-10-49-49 - mco-thanos-without-resources-requests: true - mco-imageTagSuffix: 2.11.0-SNAPSHOT-2024-05-06-19-22-35 spec: advanced: retentionConfig: diff --git a/examples/minio-tls/minio-pvc.yaml b/examples/minio-tls/minio-pvc.yaml index 20023982d..a501dbf00 100644 --- a/examples/minio-tls/minio-pvc.yaml +++ b/examples/minio-tls/minio-pvc.yaml @@ -6,7 +6,7 @@ metadata: name: minio namespace: open-cluster-management-observability spec: - storageClassName: standard + storageClassName: gp3-csi accessModes: - ReadWriteOnce resources: diff --git a/examples/minio/minio-pvc.yaml b/examples/minio/minio-pvc.yaml index 20023982d..a501dbf00 100644 --- a/examples/minio/minio-pvc.yaml +++ b/examples/minio/minio-pvc.yaml @@ -6,7 +6,7 @@ metadata: name: minio namespace: open-cluster-management-observability spec: - storageClassName: standard + storageClassName: gp3-csi accessModes: - ReadWriteOnce resources: diff --git a/operators/multiclusterobservability/config/manager/kustomization.yaml b/operators/multiclusterobservability/config/manager/kustomization.yaml index 226eea9fd..ae5e515ac 100644 --- a/operators/multiclusterobservability/config/manager/kustomization.yaml +++ b/operators/multiclusterobservability/config/manager/kustomization.yaml @@ -6,6 +6,6 @@ kind: Kustomization images: - name: quay.io/stolostron/multicluster-observability-operator newName: quay.io/stolostron/multicluster-observability-operator - newTag: 2.11.0-SNAPSHOT-2024-05-14-04-35-15 + newTag: latest patches: - path: manager_webhook_patch.yaml diff --git a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml index e98490f25..ede8d8f2b 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml @@ -2,18 +2,21 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - app: multicluster-observability-grafana-test - name: grafana-test + app: multicluster-observability-grafana + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + name: grafana namespace: open-cluster-management-observability spec: - replicas: 1 + replicas: 2 selector: matchLabels: - app: multicluster-observability-grafana-test + app: multicluster-observability-grafana + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" template: metadata: labels: - app: multicluster-observability-grafana-test + app: multicluster-observability-grafana + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" spec: affinity: podAntiAffinity: @@ -39,9 +42,9 @@ spec: containers: - args: - -config=/etc/grafana/grafana.ini - image: quay.io/stolostron/grafana:2.11.0-SNAPSHOT-2024-05-14-04-35-15 + image: quay.io/stolostron/grafana:2.4.0-SNAPSHOT-2021-09-23-07-02-14 imagePullPolicy: IfNotPresent - name: grafana-test + name: grafana ports: - containerPort: 3001 name: http @@ -69,7 +72,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: quay.io/stolostron/grafana-dashboard-loader:2.11.0-SNAPSHOT-2024-05-14-04-35-15 + image: quay.io/stolostron/grafana-dashboard-loader:2.3.0-SNAPSHOT-2021-07-26-18-43-26 imagePullPolicy: IfNotPresent resources: requests: @@ -130,11 +133,11 @@ spec: - name: grafana-datasources secret: defaultMode: 420 - secretName: grafana-datasources-test + secretName: grafana-datasources - name: grafana-config secret: defaultMode: 420 - secretName: grafana-config-test + secretName: grafana-config - name: tls-secret secret: defaultMode: 420 From c78637ec0c1252f11031efe8656326159ea25233 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 23 May 2024 12:14:30 +0200 Subject: [PATCH 31/33] refactor Signed-off-by: Coleen Iona Quadros --- tests/pkg/tests/observability_endpoint_preserve_test.go | 6 ------ tests/run-in-kind/run-e2e-in-kind.sh | 2 ++ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 3545c84e8..30e206271 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -8,8 +8,6 @@ import ( "fmt" "os" - "k8s.io/klog/v2" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -213,10 +211,6 @@ func runMetricsCollectorTests(clusterConfig utils.Cluster) { var _ = Describe("Observability:", func() { for _, clusterConfig := range testOptions.ManagedClusters { -<<<<<<< HEAD - klog.Error("Coleen Running metrics collector tests for cluster: ", clusterConfig.Name) -======= ->>>>>>> f4c3e33d (refactor to accomodate managed cluster) runMetricsCollectorTests(clusterConfig) } }) diff --git a/tests/run-in-kind/run-e2e-in-kind.sh b/tests/run-in-kind/run-e2e-in-kind.sh index 5ab9b2621..74460cf01 100755 --- a/tests/run-in-kind/run-e2e-in-kind.sh +++ b/tests/run-in-kind/run-e2e-in-kind.sh @@ -61,3 +61,5 @@ run() { run_e2e_test } +run + From b445cd2585e3aee31d9e8f8dd648fdc87b0b1fd6 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 23 May 2024 13:36:24 +0200 Subject: [PATCH 32/33] lint Signed-off-by: Coleen Iona Quadros --- cicd-scripts/setup-e2e-tests.sh | 45 +++++++++++++-------------- go.mod | 2 +- tests/pkg/utils/mco_managedcluster.go | 1 + tests/run-in-kind/run-e2e-in-kind.sh | 2 -- 4 files changed, 24 insertions(+), 26 deletions(-) diff --git a/cicd-scripts/setup-e2e-tests.sh b/cicd-scripts/setup-e2e-tests.sh index 30f75bb9d..f53a163a3 100755 --- a/cicd-scripts/setup-e2e-tests.sh +++ b/cicd-scripts/setup-e2e-tests.sh @@ -43,7 +43,7 @@ deploy_hub_spoke_core() { REGISTRATION_LATEST_SNAPSHOT='2.4.9-SNAPSHOT-2022-11-17-20-19-31' make cluster-ip IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} make deploy IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} - # wait until hub and spoke are ready + # wait until hub and spoke are ready wait_for_deployment_ready 10 60s ${HUB_NS} cluster-manager-registration-controller cluster-manager-registration-webhook cluster-manager-work-webhook wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent @@ -61,22 +61,22 @@ approve_csr_joinrequest() { for i in {1..60}; do # TODO(morvencao): remove the hard-coded cluster label # for loop for the case that multiple clusters are created - csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER}) - if [[ -n ${csrs} ]]; then - csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name}) - for csrname in ${csrnames}; do - echo "approve CSR: ${csrname}" - kubectl certificate approve ${csrname} - done - break - fi - if [[ ${i} -eq 60 ]]; then - echo "timeout wait for CSR is created." - exit 1 - fi - echo "retrying in 10s..." - sleep 10 - done + csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER}) + if [[ -n ${csrs} ]]; then + csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name}) + for csrname in ${csrnames}; do + echo "approve CSR: ${csrname}" + kubectl certificate approve ${csrname} + done + break + fi + if [[ ${i} -eq 60 ]]; then + echo "timeout wait for CSR is created." + exit 1 + fi + echo "retrying in 10s..." + sleep 10 + done done for i in {1..20}; do @@ -228,7 +228,6 @@ wait_for_deployment_ready() { deploy_managed_cluster() { echo "Setting Kubernetes context to the managed cluster..." - KUBECONFIG=/tmp/managed.yaml IS_KIND_ENV=true kubectl config use-context kind-managed export MANAGED_CLUSTER="managed-cluster-1" @@ -243,7 +242,7 @@ deploy_managed_cluster() { ${SED_COMMAND} "s~clusterName: cluster1$~clusterName: ${MANAGED_CLUSTER}~g" deploy/klusterlet/config/samples/operator_open-cluster-management_klusterlets.cr.yaml make deploy-spoke IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent - } +} deploy_hub_and_managed_cluster() { cd $(dirname ${BASH_SOURCE}) @@ -269,15 +268,15 @@ deploy_hub_and_managed_cluster() { echo "Accept join of hub,cluster1" KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true - clusteradm accept --context ${hubctx} --clusters ${c1},${hub_name} --skip-approve-check + clusteradm accept --context ${hubctx} --clusters ${c1},${hub_name} --skip-approve-check kubectl get managedclusters --all-namespaces --context ${hubctx} } # function execute is the main routine to do the actual work execute() { -# deploy_hub_spoke_core -# approve_csr_joinrequest -# deploy_managed_cluster + # deploy_hub_spoke_core + # approve_csr_joinrequest + # deploy_managed_cluster deploy_hub_and_managed_cluster deploy_mco_operator deploy_grafana_test diff --git a/go.mod b/go.mod index 94758aea6..1ac8ecf81 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,6 @@ require ( k8s.io/apimachinery v0.28.2 k8s.io/client-go v12.0.0+incompatible k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.100.1 k8s.io/kubectl v0.27.2 open-cluster-management.io/addon-framework v0.8.1-0.20231128122622-3bfdbffb237c open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30 @@ -164,6 +163,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiserver v0.27.2 // indirect k8s.io/component-base v0.27.2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-aggregator v0.26.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index 29d450402..763c85405 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -7,6 +7,7 @@ package utils import ( "context" "errors" + goversion "github.com/hashicorp/go-version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/tests/run-in-kind/run-e2e-in-kind.sh b/tests/run-in-kind/run-e2e-in-kind.sh index 74460cf01..4838b46a3 100755 --- a/tests/run-in-kind/run-e2e-in-kind.sh +++ b/tests/run-in-kind/run-e2e-in-kind.sh @@ -23,7 +23,6 @@ create_kind_cluster() { export KUBECONFIG=$HOME/.kube/kind-config-$1 } - deploy_service_ca_operator() { kubectl create ns openshift-config-managed kubectl apply -f ${WORKDIR}/service-ca/ @@ -62,4 +61,3 @@ run() { } run - From bf28bc378fe265b8385eba0f949a67bd757847b4 Mon Sep 17 00:00:00 2001 From: Coleen Iona Quadros Date: Thu, 23 May 2024 14:45:00 +0200 Subject: [PATCH 33/33] refacter Signed-off-by: Coleen Iona Quadros --- tests/pkg/utils/mco_configmaps.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/utils/mco_configmaps.go b/tests/pkg/utils/mco_configmaps.go index d5f7858ab..ac48179b4 100644 --- a/tests/pkg/utils/mco_configmaps.go +++ b/tests/pkg/utils/mco_configmaps.go @@ -12,7 +12,7 @@ import ( "k8s.io/klog" ) -func GetConfigMap(opt TestOptions, isHub bool, name string, +func GetConfigMap(clusterConfig Cluster, isHub bool, name string, namespace string) (error, *corev1.ConfigMap) { clientKube := getKubeClientForCluster(clusterConfig, isHub) cm, err := clientKube.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})