From 1f876bc926f593709b60039d253fdcecc8732553 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:12:43 +0100 Subject: [PATCH 01/58] feat: added leader election mechanism using leases --- cmd/kubedownscaler/main.go | 51 ++++++++++++ deployments/chart/templates/_helpers.tpl | 16 ++++ deployments/chart/templates/leaserole.yaml | 20 +++++ internal/api/kubernetes/client.go | 96 +++++++++++++++++++++- internal/api/kubernetes/util.go | 12 +++ 5 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 deployments/chart/templates/leaserole.yaml diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index c1a805c..46e1eee 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -6,8 +6,11 @@ import ( "fmt" "log/slog" "os" + "os/signal" "regexp" "sync" + "sync/atomic" + "syscall" "time" _ "time/tzdata" @@ -43,6 +46,8 @@ var ( timeAnnotation string // optional kubeconfig to use for testing purposes instead of the in-cluster config kubeconfig string + // isLeader indicates if the current replica is the leader + isLeader atomic.Bool ) func init() { @@ -97,6 +102,11 @@ func main() { slog.Error("found incompatible fields", "error", err) os.Exit(1) } + downscalerNamespace, err := kubernetes.GetCurrentNamespaceFromFile() + if err != nil { + slog.Error("failed to get downscaler namespace", "error", err) + os.Exit(1) + } ctx := context.Background() slog.Debug("getting client for kubernetes") @@ -106,8 +116,49 @@ func main() { os.Exit(1) } + // leader election and graceful termination + go func() { + // create a context to handle termination gracefully + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // listen for termination signals in a separate goroutine + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) + + // Goroutine for leader election and lease renewal + go func() { + err := client.CreateOrUpdateLease(ctx, downscalerNamespace, &isLeader) + if err != nil { + slog.Error("failed to acquire lease", "error", err) + os.Exit(1) + } + }() + + // pause and wait for termination signal + <-sigs + slog.Debug("received termination signal, deleting lease") + + // delete the lease after termination signal is intercepted + err := client.DeleteLease(ctx, downscalerNamespace, &isLeader) + if err != nil { + slog.Error("failed to delete lease", "error", err) + } else { + slog.Debug("lease deleted successfully") + } + + // cancel the context to stop the lease renewal goroutine and exit the main process + cancel() + os.Exit(1) + }() + slog.Info("started downscaler") for { + if !isLeader.Load() { + slog.Debug("not the leader, skipping workload scanning") + time.Sleep(5 * time.Second) // Sync sleep with lease duration + continue + } slog.Info("scanning workloads") workloads, err := client.GetWorkloads(includeNamespaces, includeResources, ctx) diff --git a/deployments/chart/templates/_helpers.tpl b/deployments/chart/templates/_helpers.tpl index 058f3a8..e4c073c 100644 --- a/deployments/chart/templates/_helpers.tpl +++ b/deployments/chart/templates/_helpers.tpl @@ -267,3 +267,19 @@ Create defined permissions for roles {{- end }} {{- end }} {{- end }} +{{/* +Create defined permissions for lease role +*/}} +{{- define "go-kube-downscaler.leases.permissions" -}} +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - watch + - list + - update + - delete +{{- end -}} diff --git a/deployments/chart/templates/leaserole.yaml b/deployments/chart/templates/leaserole.yaml new file mode 100644 index 0000000..322352f --- /dev/null +++ b/deployments/chart/templates/leaserole.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "go-kube-downscaler.fullname" . }}-lease-role + namespace: {{ .Release.Namespace }} +rules: +{{ include "go-kube-downscaler.leases.permissions" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "go-kube-downscaler.fullname" . }}-lease-rolebinding + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: {{ include "go-kube-downscaler.serviceAccountName" . }} +roleRef: + kind: Role + name: {{ include "go-kube-downscaler.fullname" . }}-lease-role + apiGroup: rbac.authorization.k8s.io diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index f6b18d7..55ffb28 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -6,20 +6,27 @@ import ( "errors" "fmt" "log/slog" + "os" "strings" + "sync/atomic" + "time" argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" keda "github.com/kedacore/keda/v2/pkg/generated/clientset/versioned" monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" zalando "github.com/zalando-incubator/stackset-controller/pkg/clientset" + coordv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) const ( - componentName = "kubedownscaler" + componentName = "kubedownscaler" + leaseName = "downscaler-lease" + leaseDuration = 30 * time.Second + leaseCheckSleepDuration = leaseDuration / 2 ) var errResourceNotSupported = errors.New("error: specified rescource type is not supported") @@ -36,6 +43,10 @@ type Client interface { UpscaleWorkload(workload scalable.Workload, ctx context.Context) error // addWorkloadEvent creates a new event on the workload addWorkloadEvent(eventType string, reason string, id string, message string, workload scalable.Workload, ctx context.Context) error + // CreateOrUpdateLease creates or update the downscaler lease + CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error + // DeleteLease deletes the downscaler lease + DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error } // NewClient makes a new Client @@ -207,3 +218,86 @@ func (c client) addWorkloadEvent(eventType, reason, id, message string, workload } return nil } + +// CreateOrUpdateLease attempts to acquire and maintain a lease for leadership. +func (c client) CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { + // get hostname for holder identity + holderIdentity, err := os.Hostname() + if err != nil { + slog.Error("failed to get hostname", "error", err) + return err + } + + leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) + leaseDurationSeconds := int32(leaseDuration.Seconds()) + + for { + // lease Object + lease := &coordv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: leaseName, + Namespace: leaseNamespace, + }, + Spec: coordv1.LeaseSpec{ + HolderIdentity: &holderIdentity, + LeaseDurationSeconds: &leaseDurationSeconds, + RenewTime: &metav1.MicroTime{Time: time.Now()}, + }, + } + + // search for an existing lease inside the namespace + existingLease, err := leasesClient.Get(ctx, leaseName, metav1.GetOptions{}) + if err != nil { + // creates new lease if lease doesn't exist, and jump to the next iteration + slog.Debug("creating new lease", "lease", leaseName, "namespace", leaseNamespace) + _, err = leasesClient.Create(ctx, lease, metav1.CreateOptions{}) + if err != nil { + slog.Error("failed to create lease", "error", err) + time.Sleep(leaseCheckSleepDuration) + continue + } + slog.Debug("acquired lease", "holder", holderIdentity, "namespace", leaseNamespace) + isLeader.Store(true) + } else { + // check if the existing lease has expired or is held by another pod; if it is held by another pod jump to the next iteration + if existingLease.Spec.RenewTime != nil && + time.Since(existingLease.Spec.RenewTime.Time) < leaseDuration { + if *existingLease.Spec.HolderIdentity != holderIdentity { + slog.Debug("lease already held by another", "holder", *existingLease.Spec.HolderIdentity) + isLeader.Store(false) + time.Sleep(leaseCheckSleepDuration) + continue + } + } + + // update the lease if it is currently held by the current pod + existingLease.Spec.HolderIdentity = &holderIdentity + existingLease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} + _, err = leasesClient.Update(ctx, existingLease, metav1.UpdateOptions{}) + if err != nil { + slog.Error("failed to update lease", "error", err) + time.Sleep(leaseCheckSleepDuration) + continue + } + slog.Debug("lease renewed", "holder", holderIdentity, "namespace", leaseNamespace) + isLeader.Store(true) + } + + // sleep before renewing + time.Sleep(leaseCheckSleepDuration) + } +} + +func (c client) DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { + leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) + + err := leasesClient.Delete(ctx, leaseName, metav1.DeleteOptions{}) + if err != nil { + slog.Error("failed to delete lease %s in namespace %s", leaseName, leaseNamespace) + return err + } + + isLeader.Store(false) + slog.Debug("deleted lease %s in namespace %s", leaseName, leaseNamespace) + return nil +} diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 499f418..508e80b 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -1,8 +1,10 @@ package kubernetes import ( + "fmt" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "os" ) // getConfig gets a rest.Config for the specified kubeconfig or if empty from the in-cluster config @@ -12,3 +14,13 @@ func getConfig(kubeconfig string) (*rest.Config, error) { } return clientcmd.BuildConfigFromFlags("", kubeconfig) } + +// GetCurrentNamespaceFromFile retrieves downscaler namespace from its service account file +func GetCurrentNamespaceFromFile() (string, error) { + namespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + namespace, err := os.ReadFile(namespaceFile) + if err != nil { + return "", fmt.Errorf("failed to read namespace file: %v", err) + } + return string(namespace), nil +} From 7e0dd4c84c3245a770687b42418ac0ae9e8be685 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Dec 2024 12:16:15 +0000 Subject: [PATCH 02/58] chore: automatically push pre-commit changes --- internal/api/kubernetes/util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 508e80b..83d9f80 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -2,9 +2,10 @@ package kubernetes import ( "fmt" + "os" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "os" ) // getConfig gets a rest.Config for the specified kubeconfig or if empty from the in-cluster config From e00efc1423eb52a620ba565d5f478450651f1424 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:32:08 +0100 Subject: [PATCH 03/58] fix: prevent non-leaders from deleting the lease during graceful termination --- cmd/kubedownscaler/main.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 46e1eee..ea36da8 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -126,7 +126,7 @@ func main() { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) - // Goroutine for leader election and lease renewal + // Goroutine for leader election go func() { err := client.CreateOrUpdateLease(ctx, downscalerNamespace, &isLeader) if err != nil { @@ -137,17 +137,19 @@ func main() { // pause and wait for termination signal <-sigs - slog.Debug("received termination signal, deleting lease") + if isLeader.Load() { + slog.Debug("received termination signal, deleting lease") - // delete the lease after termination signal is intercepted - err := client.DeleteLease(ctx, downscalerNamespace, &isLeader) - if err != nil { - slog.Error("failed to delete lease", "error", err) - } else { - slog.Debug("lease deleted successfully") + // delete the lease after termination signal is intercepted + err := client.DeleteLease(ctx, downscalerNamespace, &isLeader) + if err != nil { + slog.Error("failed to delete lease", "error", err) + } else { + slog.Debug("lease deleted successfully") + } } - // cancel the context to stop the lease renewal goroutine and exit the main process + // cancel the context to stop the leader election goroutine and exit the main process cancel() os.Exit(1) }() From 36009d166ca5da0a1087092db6ee48c28ac86b02 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 18 Dec 2024 19:46:12 +0100 Subject: [PATCH 04/58] refactor: aligned logs inside DeleteLease function to the common style --- internal/api/kubernetes/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 55ffb28..c560d22 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -293,11 +293,11 @@ func (c client) DeleteLease(ctx context.Context, leaseNamespace string, isLeader err := leasesClient.Delete(ctx, leaseName, metav1.DeleteOptions{}) if err != nil { - slog.Error("failed to delete lease %s in namespace %s", leaseName, leaseNamespace) + slog.Error("failed to delete lease", "lease", leaseName, "namespace", leaseNamespace) return err } isLeader.Store(false) - slog.Debug("deleted lease %s in namespace %s", leaseName, leaseNamespace) + slog.Debug("deleted lease", "lease", leaseName, "namespace", leaseNamespace) return nil } From 1c7c548a5b9b9ca0095e9626311fa366c73fb7a8 Mon Sep 17 00:00:00 2001 From: Jan <157487559+JTaeuber@users.noreply.github.com> Date: Fri, 3 Jan 2025 11:47:18 +0100 Subject: [PATCH 05/58] fix: make workflows run for forks (#77) --- .github/workflows/auto_assign_author.yaml | 2 +- .github/workflows/check_for_release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/auto_assign_author.yaml b/.github/workflows/auto_assign_author.yaml index 7837812..97fadb7 100644 --- a/.github/workflows/auto_assign_author.yaml +++ b/.github/workflows/auto_assign_author.yaml @@ -1,7 +1,7 @@ name: Auto Assign Author on: - pull_request: + pull_request_target: types: [opened, reopened] permissions: diff --git a/.github/workflows/check_for_release.yaml b/.github/workflows/check_for_release.yaml index 98117ee..91c6961 100644 --- a/.github/workflows/check_for_release.yaml +++ b/.github/workflows/check_for_release.yaml @@ -1,7 +1,7 @@ name: Check for new release on: - pull_request: + pull_request_target: types: [opened, synchronize, labeled, unlabeled] jobs: From 936f30cc44150a8999b46bc1cdabbf267a4fad85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Jan 2025 14:33:04 +0100 Subject: [PATCH 06/58] chore(deps): bump golang.org/x/net from 0.28.0 to 0.33.0 (#82) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.28.0 to 0.33.0. - [Commits](https://github.com/golang/net/compare/v0.28.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index eca0382..dee229f 100644 --- a/go.mod +++ b/go.mod @@ -53,11 +53,11 @@ require ( github.com/szuecs/routegroup-client v0.28.2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.22.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.6.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.34.2 // indirect diff --git a/go.sum b/go.sum index 8edaf42..6af9b89 100644 --- a/go.sum +++ b/go.sum @@ -280,8 +280,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -294,8 +294,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -327,8 +327,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -343,8 +343,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -358,8 +358,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= From 7b184220487ae7707f28278e64d2f5aabcb59958 Mon Sep 17 00:00:00 2001 From: Jonathan Mayer Date: Thu, 9 Jan 2025 14:39:09 +0100 Subject: [PATCH 07/58] chore: add dependabot config (#83) --- .github/dependabot.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..3dd84da --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,30 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + labels: + - "housekeeping" + - "dependencies" + commit-message: + prefix: "chore" + include: "scope" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + labels: + - "housekeeping" + commit-message: + prefix: "chore" + include: "scope" + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "daily" + labels: + - "housekeeping" + commit-message: + prefix: "chore" + include: "scope" From cc4a1bb65f0901d92ca24b1e4b1fd200bdc4d11e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 06:51:28 +0100 Subject: [PATCH 08/58] chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml index cd478e1..200b3da 100644 --- a/.github/workflows/docker_build.yaml +++ b/.github/workflows/docker_build.yaml @@ -48,7 +48,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 id: build-push with: context: . From 9be9bcb332896a926941fb10dfbadfdc0e64f88f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 06:51:57 +0100 Subject: [PATCH 09/58] chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/pre_commit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pre_commit.yaml b/.github/workflows/pre_commit.yaml index 8859a85..8b278b7 100644 --- a/.github/workflows/pre_commit.yaml +++ b/.github/workflows/pre_commit.yaml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Setup Go environment - uses: actions/setup-go@v5.0.2 + uses: actions/setup-go@v5.2.0 with: go-version-file: "go.mod" - name: Install dependencies From b216b838405301c75a8d4711ea7b89bce5bb292a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 07:03:48 +0100 Subject: [PATCH 10/58] chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer --- Dockerfile | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8d26855..4a00b23 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.1 AS build +FROM golang:1.23.4 AS build WORKDIR /tmp/kubedownscaler diff --git a/go.mod b/go.mod index dee229f..67e3704 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/caas-team/gokubedownscaler -go 1.23.1 +go 1.23.4 require ( github.com/argoproj/argo-rollouts v1.7.2 From ac13996e5e3b3d27b56defbfcd6c64c77da43f8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 07:19:20 +0100 Subject: [PATCH 11/58] chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer --- go.mod | 24 +++++++++++------------- go.sum | 59 ++++++++++++++++++++++++++++------------------------------ 2 files changed, 39 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 67e3704..6289cb2 100644 --- a/go.mod +++ b/go.mod @@ -9,9 +9,9 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 github.com/stretchr/testify v1.9.0 github.com/zalando-incubator/stackset-controller v1.4.84 - k8s.io/api v0.31.0 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 ) require ( @@ -28,13 +28,11 @@ require ( github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect @@ -54,23 +52,23 @@ require ( github.com/x448/float16 v0.8.4 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/time v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 // indirect sigs.k8s.io/controller-runtime v0.19.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 6af9b89..562fc16 100644 --- a/go.sum +++ b/go.sum @@ -59,7 +59,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -92,8 +91,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -103,8 +102,6 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -156,8 +153,8 @@ github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxm github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -172,8 +169,8 @@ github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfad github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -283,8 +280,8 @@ golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -361,8 +358,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -378,8 +375,8 @@ golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -400,8 +397,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -416,23 +413,22 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.28.7/go.mod h1:y4RbcjCCMff1930SG/TcP3AUKNfaJUgIeUp58e/2vyY= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= k8s.io/apimachinery v0.28.7/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.28.7/go.mod h1:xIoEaDewZ+EwWOo1/F1t0IOKMPe1rwBZhLu9Es6y0tE= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= k8s.io/code-generator v0.28.7/go.mod h1:IaYGMqYjgj0zE3L9mnHo7hIL9GkY08GvGyyracaIxTA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -443,21 +439,22 @@ k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 h1:1Wof1cGQgA5pqgo8MxKPtf+qN6Sh/0JzznmeGPm1HnE= -k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8/go.mod h1:Os6V6dZwLNii3vxFpxcNaTmH8LJJBkOTg1N0tOA0fvA= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 h1:VHUW124ZpkDn4EnIzMuGWvGuJte3ISIoHMmEw2kx0zU= knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653/go.mod h1:H+5rS2GEWpAZzrmQoXOEVq/1M77LLMhR7+4jZBMOQ24= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From ec485c1b5d94bc160301ab9def37dbd9161f90f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 07:38:58 +0100 Subject: [PATCH 12/58] chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 (#90) * chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 Bumps [github.com/kedacore/keda/v2](https://github.com/kedacore/keda) from 2.15.1 to 2.16.1. - [Release notes](https://github.com/kedacore/keda/releases) - [Changelog](https://github.com/kedacore/keda/blob/main/CHANGELOG.md) - [Commits](https://github.com/kedacore/keda/compare/v2.15.1...v2.16.1) --- updated-dependencies: - dependency-name: github.com/kedacore/keda/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * chore: automatically push pre-commit changes * Merge remote-tracking branch 'origin/main' * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: jonathan-mayer --- go.mod | 44 ++++++++++++------------ go.sum | 107 +++++++++++++++++++++++++++++---------------------------- 2 files changed, 76 insertions(+), 75 deletions(-) diff --git a/go.mod b/go.mod index 6289cb2..fc13331 100644 --- a/go.mod +++ b/go.mod @@ -4,10 +4,10 @@ go 1.23.4 require ( github.com/argoproj/argo-rollouts v1.7.2 - github.com/kedacore/keda/v2 v2.15.1 + github.com/kedacore/keda/v2 v2.16.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/zalando-incubator/stackset-controller v1.4.84 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 @@ -21,7 +21,7 @@ require ( github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/expr-lang/expr v1.16.9 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -29,46 +29,46 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.0 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.61.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/szuecs/routegroup-client v0.28.2 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.32.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 // indirect - sigs.k8s.io/controller-runtime v0.19.0 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect + knative.dev/pkg v0.0.0-20250109201817-83cd52ed87d9 // indirect + sigs.k8s.io/controller-runtime v0.19.4 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 562fc16..d868338 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -75,8 +75,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -91,8 +91,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -106,12 +106,12 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kedacore/keda/v2 v2.15.1 h1:Kb3woYuCeCPICH037vTIcUopXgOYpdP2qa+CmHgV3SE= -github.com/kedacore/keda/v2 v2.15.1/go.mod h1:2umVEoNgklKt0+q+7BEEbrSgxqh+KPjyh6vnKXt3sls= +github.com/kedacore/keda/v2 v2.16.1 h1:LfYsxfSX8DjetLW8q9qnriImH936POrQJvE+caRoScI= +github.com/kedacore/keda/v2 v2.16.1/go.mod h1:pO2ksUCwSOQ2u3OWqj+jh9Hgf0+26MZug6dF7WWgcAk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -122,8 +122,9 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -153,8 +154,8 @@ github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxm github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= +github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= @@ -169,8 +170,8 @@ github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfad github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -182,12 +183,12 @@ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 h github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM= github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 h1:yncs8NglhE3hB+viNsabCAF9TBBDOBljHUyxHC5fSGY= github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2/go.mod h1:AfbzyEUFxJmSoTiMcgNHHjDKcorBVd9TIwx0viURgEw= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -209,8 +210,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/szuecs/routegroup-client v0.28.2 h1:Dk9D6VqhtYM0IVRkik0fpZ5IbVrf1mHssYmAyRrwehU= github.com/szuecs/routegroup-client v0.28.2/go.mod h1:QpI/XGdncIAYIE03Nwjq0w+NXlIfV/n56BI1uR2a2Do= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -236,8 +237,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= @@ -249,8 +250,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -277,11 +278,11 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -324,8 +325,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -340,8 +341,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -358,8 +359,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -375,8 +376,8 @@ golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -397,8 +398,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -421,8 +422,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.28.7/go.mod h1:y4RbcjCCMff1930SG/TcP3AUKNfaJUgIeUp58e/2vyY= k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= k8s.io/apimachinery v0.28.7/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= @@ -439,22 +440,22 @@ k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653 h1:VHUW124ZpkDn4EnIzMuGWvGuJte3ISIoHMmEw2kx0zU= -knative.dev/pkg v0.0.0-20240805063731-c88d5dad9653/go.mod h1:H+5rS2GEWpAZzrmQoXOEVq/1M77LLMhR7+4jZBMOQ24= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/pkg v0.0.0-20250109201817-83cd52ed87d9 h1:b4S5OUBLwlbfC9Twr+4AfEcH7zK8CKUdjdyOTirfvoU= +knative.dev/pkg v0.0.0-20250109201817-83cd52ed87d9/go.mod h1:C1u0e6tMiEkqcKsurZn2wGTH6utcTbODFwJBPyZ56lA= +sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= +sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From 89afe66cf65e90e737cf9c5c6d7edf225c5b53f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 07:47:01 +0100 Subject: [PATCH 13/58] chore(deps): bump github.com/prometheus-operator/prometheus-operator/pkg/client (#87) Bumps [github.com/prometheus-operator/prometheus-operator/pkg/client](https://github.com/prometheus-operator/prometheus-operator) from 0.76.2 to 0.79.2. - [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases) - [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.2...v0.79.2) --- updated-dependencies: - dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index fc13331..41eadf4 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.23.4 require ( github.com/argoproj/argo-rollouts v1.7.2 github.com/kedacore/keda/v2 v2.16.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 - github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2 github.com/stretchr/testify v1.10.0 github.com/zalando-incubator/stackset-controller v1.4.84 k8s.io/api v0.32.0 diff --git a/go.sum b/go.sum index d868338..3449ee8 100644 --- a/go.sum +++ b/go.sum @@ -179,10 +179,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 h1:BpGDC87A2SaxbKgONsFLEX3kRcRJee2aLQbjXsuz0hA= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 h1:yncs8NglhE3hB+viNsabCAF9TBBDOBljHUyxHC5fSGY= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2/go.mod h1:AfbzyEUFxJmSoTiMcgNHHjDKcorBVd9TIwx0viURgEw= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 h1:DGv150w4UyxnjNHlkCw85R3+lspOxegtdnbpP2vKRrk= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2 h1:wUMuHTC069Ayy+0/srqD5OrLVP/QRhSCUR/7SJ8tSqQ= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2/go.mod h1:671/KciyzKiTmvIYTpp7CzWD1/TNXVPgeDLJcGFWrOM= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= From 6d3258a25ede190ea96d47aebef62bdebefdef75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 08:21:37 +0100 Subject: [PATCH 14/58] chore(deps): bump github.com/zalando-incubator/stackset-controller from 1.4.84 to 1.4.92 (#94) * chore(deps): bump github.com/zalando-incubator/stackset-controller Bumps [github.com/zalando-incubator/stackset-controller](https://github.com/zalando-incubator/stackset-controller) from 1.4.84 to 1.4.92. - [Release notes](https://github.com/zalando-incubator/stackset-controller/releases) - [Commits](https://github.com/zalando-incubator/stackset-controller/compare/v1.4.84...v1.4.92) --- updated-dependencies: - dependency-name: github.com/zalando-incubator/stackset-controller dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump deps dependabot didnt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer --- go.mod | 5 +++-- go.sum | 11 ++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 41eadf4..358dfc9 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2 github.com/stretchr/testify v1.10.0 - github.com/zalando-incubator/stackset-controller v1.4.84 + github.com/zalando-incubator/stackset-controller v1.4.92 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 k8s.io/client-go v0.32.0 @@ -46,6 +46,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.61.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/szuecs/routegroup-client v0.28.2 // indirect @@ -66,7 +67,7 @@ require ( k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect - knative.dev/pkg v0.0.0-20250109201817-83cd52ed87d9 // indirect + knative.dev/pkg v0.0.0-20250110150618-accfe3649188 // indirect sigs.k8s.io/controller-runtime v0.19.4 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect diff --git a/go.sum b/go.sum index 3449ee8..5dc30ec 100644 --- a/go.sum +++ b/go.sum @@ -193,8 +193,9 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -220,8 +221,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zalando-incubator/stackset-controller v1.4.84 h1:jyQi1iLOu5TA5G112nNwPAKKZOdWQO/DzxMQq/AE0QI= -github.com/zalando-incubator/stackset-controller v1.4.84/go.mod h1:PDy2PY2eqkUehNVHxh/VtxJbseVcwvYUOuafc2DCZzY= +github.com/zalando-incubator/stackset-controller v1.4.92 h1:Q4wA+cn0eqW5U9qhm061LxZbj39XR3L8/c2vKozkvmE= +github.com/zalando-incubator/stackset-controller v1.4.92/go.mod h1:29DAYtU9LMRzSqXLzOfQg7bQYvNNPEWUhKc5dvBBSK0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -446,8 +447,8 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20250109201817-83cd52ed87d9 h1:b4S5OUBLwlbfC9Twr+4AfEcH7zK8CKUdjdyOTirfvoU= -knative.dev/pkg v0.0.0-20250109201817-83cd52ed87d9/go.mod h1:C1u0e6tMiEkqcKsurZn2wGTH6utcTbODFwJBPyZ56lA= +knative.dev/pkg v0.0.0-20250110150618-accfe3649188 h1:xM2blxCAN0VzKQPYqeq2jNBL7xN6Iyn1avs+Ib+ogaM= +knative.dev/pkg v0.0.0-20250110150618-accfe3649188/go.mod h1:C1u0e6tMiEkqcKsurZn2wGTH6utcTbODFwJBPyZ56lA= sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= From f646d954c66135657e84317f2fa3dd01028a4ca2 Mon Sep 17 00:00:00 2001 From: Jonathan Mayer Date: Mon, 13 Jan 2025 09:35:27 +0100 Subject: [PATCH 15/58] perf: reduce memory allocations (#81) * perf: reduce memory allocations * perf: avoid preallocating the slice with zero values * perf: avoid temporary copies in get resource functions --- internal/api/kubernetes/client.go | 2 +- internal/pkg/scalable/cronjobs.go | 6 +++--- internal/pkg/scalable/daemonsets.go | 6 +++--- internal/pkg/scalable/deployments.go | 6 +++--- internal/pkg/scalable/horizontalpodautoscalers.go | 6 +++--- internal/pkg/scalable/jobs.go | 6 +++--- internal/pkg/scalable/poddisruptionbudgets.go | 6 +++--- internal/pkg/scalable/prometheuses.go | 6 +++--- internal/pkg/scalable/rollouts.go | 6 +++--- internal/pkg/scalable/scaledobjects.go | 6 +++--- internal/pkg/scalable/stacks.go | 6 +++--- internal/pkg/scalable/statefulsets.go | 6 +++--- internal/pkg/values/regexList.go | 1 + internal/pkg/values/stringlistValue.go | 5 ++--- internal/pkg/values/timespan.go | 2 +- 15 files changed, 38 insertions(+), 38 deletions(-) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index c560d22..c24a0e9 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -106,7 +106,7 @@ func (c client) GetNamespaceAnnotations(namespace string, ctx context.Context) ( func (c client) GetWorkloads(namespaces []string, resourceTypes []string, ctx context.Context) ([]scalable.Workload, error) { var results []scalable.Workload if namespaces == nil { - namespaces = append(namespaces, "") + namespaces = []string{""} } for _, namespace := range namespaces { for _, resourceType := range resourceTypes { diff --git a/internal/pkg/scalable/cronjobs.go b/internal/pkg/scalable/cronjobs.go index 165ef91..8210ff2 100644 --- a/internal/pkg/scalable/cronjobs.go +++ b/internal/pkg/scalable/cronjobs.go @@ -10,13 +10,13 @@ import ( // getCronJobs is the getResourceFunc for CronJobs func getCronJobs(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload cronjobs, err := clientsets.Kubernetes.BatchV1().CronJobs(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get cronjobs: %w", err) } - for _, item := range cronjobs.Items { - results = append(results, &suspendScaledWorkload{&cronJob{&item}}) + results := make([]Workload, 0, len(cronjobs.Items)) + for i := range cronjobs.Items { + results = append(results, &suspendScaledWorkload{&cronJob{&cronjobs.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/daemonsets.go b/internal/pkg/scalable/daemonsets.go index dd27ac5..3630991 100644 --- a/internal/pkg/scalable/daemonsets.go +++ b/internal/pkg/scalable/daemonsets.go @@ -14,13 +14,13 @@ const ( // getDaemonSets is the getResourceFunc for DaemonSets func getDaemonSets(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload daemonsets, err := clientsets.Kubernetes.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get daemonsets: %w", err) } - for _, item := range daemonsets.Items { - results = append(results, &daemonSet{&item}) + results := make([]Workload, 0, len(daemonsets.Items)) + for i := range daemonsets.Items { + results = append(results, &daemonSet{&daemonsets.Items[i]}) } return results, nil } diff --git a/internal/pkg/scalable/deployments.go b/internal/pkg/scalable/deployments.go index c54a08e..0377013 100644 --- a/internal/pkg/scalable/deployments.go +++ b/internal/pkg/scalable/deployments.go @@ -10,13 +10,13 @@ import ( // getDeployments is the getResourceFunc for Deployments func getDeployments(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload deployments, err := clientsets.Kubernetes.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get deployments: %w", err) } - for _, item := range deployments.Items { - results = append(results, &replicaScaledWorkload{&deployment{&item}}) + results := make([]Workload, 0, len(deployments.Items)) + for i := range deployments.Items { + results = append(results, &replicaScaledWorkload{&deployment{&deployments.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/horizontalpodautoscalers.go b/internal/pkg/scalable/horizontalpodautoscalers.go index 4daffcd..e891768 100644 --- a/internal/pkg/scalable/horizontalpodautoscalers.go +++ b/internal/pkg/scalable/horizontalpodautoscalers.go @@ -13,13 +13,13 @@ var errMinReplicasBoundsExceeded = errors.New("error: a HPAs minReplicas can onl // getHorizontalPodAutoscalers is the getResourceFunc for horizontalPodAutoscalers func getHorizontalPodAutoscalers(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload hpas, err := clientsets.Kubernetes.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get horizontalpodautoscalers: %w", err) } - for _, item := range hpas.Items { - results = append(results, &replicaScaledWorkload{&horizontalPodAutoscaler{&item}}) + results := make([]Workload, 0, len(hpas.Items)) + for i := range hpas.Items { + results = append(results, &replicaScaledWorkload{&horizontalPodAutoscaler{&hpas.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/jobs.go b/internal/pkg/scalable/jobs.go index 83518e1..67cf14c 100644 --- a/internal/pkg/scalable/jobs.go +++ b/internal/pkg/scalable/jobs.go @@ -10,13 +10,13 @@ import ( // getDeployments is the getResourceFunc for Jobs func getJobs(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload jobs, err := clientsets.Kubernetes.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get jobs: %w", err) } - for _, item := range jobs.Items { - results = append(results, &suspendScaledWorkload{&job{&item}}) + results := make([]Workload, 0, len(jobs.Items)) + for i := range jobs.Items { + results = append(results, &suspendScaledWorkload{&job{&jobs.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/poddisruptionbudgets.go b/internal/pkg/scalable/poddisruptionbudgets.go index 93e3714..9852df6 100644 --- a/internal/pkg/scalable/poddisruptionbudgets.go +++ b/internal/pkg/scalable/poddisruptionbudgets.go @@ -14,13 +14,13 @@ import ( // getPodDisruptionBudgets is the getResourceFunc for podDisruptionBudget func getPodDisruptionBudgets(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload poddisruptionbudgets, err := clientsets.Kubernetes.PolicyV1().PodDisruptionBudgets(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get poddisruptionbudgets: %w", err) } - for _, item := range poddisruptionbudgets.Items { - results = append(results, &podDisruptionBudget{&item}) + results := make([]Workload, 0, len(poddisruptionbudgets.Items)) + for i := range poddisruptionbudgets.Items { + results = append(results, &podDisruptionBudget{&poddisruptionbudgets.Items[i]}) } return results, nil } diff --git a/internal/pkg/scalable/prometheuses.go b/internal/pkg/scalable/prometheuses.go index dc020bd..63430e5 100644 --- a/internal/pkg/scalable/prometheuses.go +++ b/internal/pkg/scalable/prometheuses.go @@ -10,13 +10,13 @@ import ( // getPrometheuses is the getResourceFunc for Prometheuses func getPrometheuses(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload prometheuses, err := clientsets.Monitoring.MonitoringV1().Prometheuses(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get prometheuses: %w", err) } - for _, item := range prometheuses.Items { - results = append(results, &replicaScaledWorkload{&prometheus{item}}) + results := make([]Workload, 0, len(prometheuses.Items)) + for i := range prometheuses.Items { + results = append(results, &replicaScaledWorkload{&prometheus{prometheuses.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/rollouts.go b/internal/pkg/scalable/rollouts.go index f22ea2d..4cb797d 100644 --- a/internal/pkg/scalable/rollouts.go +++ b/internal/pkg/scalable/rollouts.go @@ -10,13 +10,13 @@ import ( // getRollouts is the getResourceFunc for Argo Rollouts func getRollouts(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload rollouts, err := clientsets.Argo.ArgoprojV1alpha1().Rollouts(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get rollouts: %w", err) } - for _, item := range rollouts.Items { - results = append(results, &replicaScaledWorkload{&rollout{&item}}) + results := make([]Workload, 0, len(rollouts.Items)) + for i := range rollouts.Items { + results = append(results, &replicaScaledWorkload{&rollout{&rollouts.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/scaledobjects.go b/internal/pkg/scalable/scaledobjects.go index ab1bcda..7b44151 100644 --- a/internal/pkg/scalable/scaledobjects.go +++ b/internal/pkg/scalable/scaledobjects.go @@ -17,13 +17,13 @@ const ( // getScaledObjects is the getResourceFunc for Keda ScaledObjects func getScaledObjects(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload scaledobjects, err := clientsets.Keda.KedaV1alpha1().ScaledObjects(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get scaledobjects: %w", err) } - for _, item := range scaledobjects.Items { - results = append(results, &replicaScaledWorkload{&scaledObject{&item}}) + results := make([]Workload, 0, len(scaledobjects.Items)) + for i := range scaledobjects.Items { + results = append(results, &replicaScaledWorkload{&scaledObject{&scaledobjects.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/stacks.go b/internal/pkg/scalable/stacks.go index b446a92..27b7919 100644 --- a/internal/pkg/scalable/stacks.go +++ b/internal/pkg/scalable/stacks.go @@ -10,13 +10,13 @@ import ( // getStacks is the getResourceFunc for Zalando Stacks func getStacks(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload stacks, err := clientsets.Zalando.ZalandoV1().Stacks(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get stacks: %w", err) } - for _, item := range stacks.Items { - results = append(results, &replicaScaledWorkload{&stack{&item}}) + results := make([]Workload, 0, len(stacks.Items)) + for i := range stacks.Items { + results = append(results, &replicaScaledWorkload{&stack{&stacks.Items[i]}}) } return results, nil } diff --git a/internal/pkg/scalable/statefulsets.go b/internal/pkg/scalable/statefulsets.go index ee84d91..b1e859d 100644 --- a/internal/pkg/scalable/statefulsets.go +++ b/internal/pkg/scalable/statefulsets.go @@ -10,13 +10,13 @@ import ( // getStatefulSets is the getResourceFunc for StatefulSets func getStatefulSets(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - var results []Workload statefulsets, err := clientsets.Kubernetes.AppsV1().StatefulSets(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) if err != nil { return nil, fmt.Errorf("failed to get statefulsets: %w", err) } - for _, item := range statefulsets.Items { - results = append(results, &replicaScaledWorkload{&statefulSet{&item}}) + results := make([]Workload, 0, len(statefulsets.Items)) + for i := range statefulsets.Items { + results = append(results, &replicaScaledWorkload{&statefulSet{&statefulsets.Items[i]}}) } return results, nil } diff --git a/internal/pkg/values/regexList.go b/internal/pkg/values/regexList.go index 7efbc9b..55e724c 100644 --- a/internal/pkg/values/regexList.go +++ b/internal/pkg/values/regexList.go @@ -10,6 +10,7 @@ type RegexList []*regexp.Regexp func (r *RegexList) Set(text string) error { entries := strings.Split(text, ",") + *r = make(RegexList, 0, len(entries)) for _, entry := range entries { entry = strings.TrimSpace(entry) re, err := regexp.Compile(entry) diff --git a/internal/pkg/values/stringlistValue.go b/internal/pkg/values/stringlistValue.go index 56df244..d463641 100644 --- a/internal/pkg/values/stringlistValue.go +++ b/internal/pkg/values/stringlistValue.go @@ -10,11 +10,10 @@ type StringListValue []string func (s *StringListValue) Set(text string) error { entries := strings.Split(text, ",") - var trimmedEntries []string + *s = make(StringListValue, 0, len(entries)) for _, entry := range entries { - trimmedEntries = append(trimmedEntries, strings.TrimSpace(entry)) + *s = append(*s, strings.TrimSpace(entry)) } - *s = trimmedEntries return nil } diff --git a/internal/pkg/values/timespan.go b/internal/pkg/values/timespan.go index e574704..187ec82 100644 --- a/internal/pkg/values/timespan.go +++ b/internal/pkg/values/timespan.go @@ -41,7 +41,7 @@ func (t *timeSpans) inTimeSpans() bool { func (t *timeSpans) Set(value string) error { spans := strings.Split(value, ",") - var timespans []TimeSpan + timespans := make([]TimeSpan, 0, len(spans)) for _, timespanText := range spans { timespanText = strings.TrimSpace(timespanText) From b860182f83e3f9d3afe860aac8cdcb69aca31c98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 06:50:43 +0100 Subject: [PATCH 16/58] chore(deps): bump k8s.io/client-go from 0.32.0 to 0.32.1 (#96) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.32.0 to 0.32.1. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.32.0...v0.32.1) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 358dfc9..ad5c5ab 100644 --- a/go.mod +++ b/go.mod @@ -9,9 +9,9 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2 github.com/stretchr/testify v1.10.0 github.com/zalando-incubator/stackset-controller v1.4.92 - k8s.io/api v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/client-go v0.32.0 + k8s.io/api v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 ) require ( diff --git a/go.sum b/go.sum index 5dc30ec..8a66283 100644 --- a/go.sum +++ b/go.sum @@ -421,16 +421,16 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.28.7/go.mod h1:y4RbcjCCMff1930SG/TcP3AUKNfaJUgIeUp58e/2vyY= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= k8s.io/apimachinery v0.28.7/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.28.7/go.mod h1:xIoEaDewZ+EwWOo1/F1t0IOKMPe1rwBZhLu9Es6y0tE= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/code-generator v0.28.7/go.mod h1:IaYGMqYjgj0zE3L9mnHo7hIL9GkY08GvGyyracaIxTA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= From 5d3c0e3acbfe885eb39172ddf5e1e70e24304266 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 06:44:06 +0100 Subject: [PATCH 17/58] chore(deps): bump golang from 1.23.4 to 1.23.5 (#98) Bumps golang from 1.23.4 to 1.23.5. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4a00b23..1f11bae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.4 AS build +FROM golang:1.23.5 AS build WORKDIR /tmp/kubedownscaler From d599d930e2596b4808bf961e531ac0893f5b330a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 06:44:45 +0100 Subject: [PATCH 18/58] chore(deps): bump actions/setup-go from 5.2.0 to 5.3.0 (#99) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.2.0 to 5.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.2.0...v5.3.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/pre_commit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pre_commit.yaml b/.github/workflows/pre_commit.yaml index 8b278b7..e2911df 100644 --- a/.github/workflows/pre_commit.yaml +++ b/.github/workflows/pre_commit.yaml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Setup Go environment - uses: actions/setup-go@v5.2.0 + uses: actions/setup-go@v5.3.0 with: go-version-file: "go.mod" - name: Install dependencies From df1411f73eb1ebb1bedf3dffe20107a7eea5bef1 Mon Sep 17 00:00:00 2001 From: Jonathan Mayer Date: Tue, 21 Jan 2025 06:45:41 +0100 Subject: [PATCH 19/58] Refactor/enforce stricter go linters (#93) * Merge branch 'perf/make-slices-more-efficient' * refactor: everything to stop new linters from crying * Squashed commit of the following: commit ae781977f09d9445c9d02caa7e1942ffc542541e Author: Jonathan Mayer Date: Mon Jan 13 09:35:27 2025 +0100 perf: reduce memory allocations (#81) * perf: reduce memory allocations * perf: avoid preallocating the slice with zero values * perf: avoid temporary copies in get resource functions commit ad608b6896074b7ed44b562c2bd4c6eb0026fd7e Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon Jan 13 08:21:37 2025 +0100 chore(deps): bump github.com/zalando-incubator/stackset-controller from 1.4.84 to 1.4.92 (#94) * chore(deps): bump github.com/zalando-incubator/stackset-controller Bumps [github.com/zalando-incubator/stackset-controller](https://github.com/zalando-incubator/stackset-controller) from 1.4.84 to 1.4.92. - [Release notes](https://github.com/zalando-incubator/stackset-controller/releases) - [Commits](https://github.com/zalando-incubator/stackset-controller/compare/v1.4.84...v1.4.92) --- updated-dependencies: - dependency-name: github.com/zalando-incubator/stackset-controller dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump deps dependabot didnt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 48d222f4ecc108be2ba8d211f69020cf3da15c63 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:47:01 2025 +0100 chore(deps): bump github.com/prometheus-operator/prometheus-operator/pkg/client (#87) Bumps [github.com/prometheus-operator/prometheus-operator/pkg/client](https://github.com/prometheus-operator/prometheus-operator) from 0.76.2 to 0.79.2. - [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases) - [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.2...v0.79.2) --- updated-dependencies: - dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit e856e24159534ce8e4a41479f385523dd5e23081 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:38:58 2025 +0100 chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 (#90) * chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 Bumps [github.com/kedacore/keda/v2](https://github.com/kedacore/keda) from 2.15.1 to 2.16.1. - [Release notes](https://github.com/kedacore/keda/releases) - [Changelog](https://github.com/kedacore/keda/blob/main/CHANGELOG.md) - [Commits](https://github.com/kedacore/keda/compare/v2.15.1...v2.16.1) --- updated-dependencies: - dependency-name: github.com/kedacore/keda/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * chore: automatically push pre-commit changes * Merge remote-tracking branch 'origin/main' * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: jonathan-mayer commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 0f9015c01fe06079fe8d78fac9f5d11e88ccdf21 Author: Jonathan Mayer Date: Thu Jan 9 14:39:09 2025 +0100 chore: add dependabot config (#83) commit f0b9f9e38f6697d878ef721a57c00e96e1c7ad05 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu Jan 9 14:33:04 2025 +0100 chore(deps): bump golang.org/x/net from 0.28.0 to 0.33.0 (#82) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.28.0 to 0.33.0. - [Commits](https://github.com/golang/net/compare/v0.28.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: resolve pr threads --- .golangci.yaml | 211 ++++++++++++- cmd/kubedownscaler/main.go | 284 +++++++++--------- cmd/kubedownscaler/main_test.go | 15 +- internal/api/kubernetes/client.go | 171 ++++------- internal/api/kubernetes/resourceLogger.go | 5 +- internal/api/kubernetes/util.go | 7 +- internal/pkg/scalable/cronjobs.go | 14 +- internal/pkg/scalable/daemonsets.go | 17 +- internal/pkg/scalable/daemonsets_test.go | 33 +- internal/pkg/scalable/deployments.go | 17 +- internal/pkg/scalable/helpers_test.go | 8 +- .../pkg/scalable/horizontalpodautoscalers.go | 23 +- internal/pkg/scalable/jobs.go | 18 +- internal/pkg/scalable/poddisruptionbudgets.go | 46 ++- .../pkg/scalable/poddisruptionbudgets_test.go | 23 +- internal/pkg/scalable/prometheuses.go | 16 +- .../pkg/scalable/replicaScaledWorkloads.go | 14 +- .../scalable/replicaScaledWorkloads_test.go | 47 ++- internal/pkg/scalable/rollouts.go | 17 +- internal/pkg/scalable/scaledobjects.go | 20 +- internal/pkg/scalable/stacks.go | 17 +- internal/pkg/scalable/statefulsets.go | 17 +- .../pkg/scalable/suspendScaledWorkloads.go | 8 +- .../scalable/suspendScaledWorkloads_test.go | 30 +- internal/pkg/scalable/util.go | 66 ++-- internal/pkg/scalable/util_test.go | 19 +- internal/pkg/scalable/workload.go | 51 ++-- internal/pkg/util/config.go | 112 +++++++ .../pkg/{values => util}/durationValue.go | 9 +- internal/pkg/util/env.go | 19 ++ internal/pkg/{values => util}/int32Value.go | 3 +- internal/pkg/{values => util}/regexList.go | 7 +- internal/pkg/util/resourceLogger.go | 10 + .../pkg/{values => util}/stringlistValue.go | 6 +- internal/pkg/values/layer.go | 81 +++-- internal/pkg/values/layerParser.go | 224 ++++++++++++++ internal/pkg/values/layer_test.go | 9 +- internal/pkg/values/timespan.go | 51 +++- internal/pkg/values/timespan_test.go | 40 ++- internal/pkg/values/triStateBool.go | 15 +- internal/pkg/values/util.go | 159 ---------- 41 files changed, 1299 insertions(+), 660 deletions(-) create mode 100644 internal/pkg/util/config.go rename internal/pkg/{values => util}/durationValue.go (78%) create mode 100644 internal/pkg/util/env.go rename internal/pkg/{values => util}/int32Value.go (95%) rename internal/pkg/{values => util}/regexList.go (97%) create mode 100644 internal/pkg/util/resourceLogger.go rename internal/pkg/{values => util}/stringlistValue.go (78%) create mode 100644 internal/pkg/values/layerParser.go delete mode 100644 internal/pkg/values/util.go diff --git a/.golangci.yaml b/.golangci.yaml index bfee0bf..a6f6008 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,17 +1,212 @@ -issues: - exclude-files: - - ".*_test.go" +linters-settings: + wsl: + allow-cuddle-declarations: true + depguard: + rules: + logger: + deny: + - pkg: "github.com/sirupsen/logrus" + desc: use the standard library's slog.Logger instead + - pkg: "github.com/ueber-go/zap" + desc: use the standard library's slog.Logger instead + dupl: + threshold: 100 + funlen: + lines: -1 # the number of lines (code + empty lines) is not a right metric and leads to code without empty line or one-liner. + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + gocyclo: + min-complexity: 15 + gofmt: + rewrite-rules: + - pattern: "interface{}" + replacement: "any" + goimports: + local-prefixes: github.com/golangci/golangci-lint + mnd: + # don't include the "operation" and "assign" + checks: + - argument + - case + - condition + - return + ignored-numbers: + - "0" + - "1" + - "2" + - "3" + ignored-functions: + - strings.SplitN + + govet: + enable-all: true + disable: + - fieldalignment # disabled because it's too strict, it checks if struct fields are sorted by size + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 140 + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-explanation: true # require an explanation for nolint directives + require-specific: true # require nolint directives to be specific about which linter is being skipped + revive: + rules: + - name: unexported-return + disabled: true + - name: unused-parameter + linters: + disable-all: true enable: - - staticcheck + - asasalint + - asciicheck + - bidichk + - bodyclose + - canonicalheader + - containedctx + - contextcheck + - copyloopvar + - cyclop + - decorder + - depguard + - dogsled + - dupl + - dupword + - durationcheck + - err113 - errcheck - - govet - - ineffassign - - typecheck - - unused + - errchkjson + - errname + - errorlint + - exhaustive + #- exhaustruct # structs, esp. option structs are not always supposed to be exhausted on initialization + - fatcontext + - forbidigo + - forcetypeassert + - funlen + - gci + - ginkgolinter + - gocheckcompilerdirectives + - gochecknoglobals + - gochecknoinits + - gochecksumtype + - gocognit + - goconst - gocritic + - gocyclo + - godot + - godox + - gofmt - gofumpt + - goheader - goimports + - gomoddirectives + - gomodguard + - goprintffuncname - gosec + - gosimple + - gosmopolitan + - govet + - grouper + - importas + - inamedparam + - ineffassign + - interfacebloat + - intrange + - ireturn + - lll + - loggercheck + - maintidx + - makezero + - mirror + - misspell + - mnd + - musttag + - nakedret + - nestif + - nilerr + - nilnil + #- nlreturn # wsl has a better implementation of the same principle, where you only have to put whitespaces if the block is longer + - noctx + - nolintlint + - nonamedreturns + - nosprintfhostport + - paralleltest + - perfsprint + - prealloc + - predeclared + - promlinter + - protogetter + - reassign + - revive + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - staticcheck + - stylecheck + - tagalign + - tagliatelle + - tenv + - testableexamples + - testifylint + #- testpackage # we might also want to test unexported functions + - thelper + - tparallel + - unconvert + - unparam + - unused + - usestdlibvars + - varnamelen + - wastedassign + - whitespace + - wrapcheck + - wsl + - zerologlint + output: print-linter-name: true + +issues: + max-same-issues: 50 + exclude-dirs: + - test/data # test files + - test/testdata # test files + - pb # protobuf files + exclude-rules: + - path: _test\.go + linters: + - mnd # test files can have magic numbers + - revive # test files can have unused parameters + - forcetypeassert # test files can have unchecked type assertion + - wrapcheck # test files can have unwrapped errors + - govet # test files can have global variables + - gochecknoglobals # test files can have global variables + - source: "//nolint: " + linters: [lll] # disable long line linter on lines with '//nolint:' directive + fix: true + +run: + timeout: 5m diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index ea36da8..10f83b7 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -6,179 +6,122 @@ import ( "fmt" "log/slog" "os" - "os/signal" "regexp" "sync" - "sync/atomic" - "syscall" "time" - _ "time/tzdata" "github.com/caas-team/gokubedownscaler/internal/api/kubernetes" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" + "github.com/caas-team/gokubedownscaler/internal/pkg/util" "github.com/caas-team/gokubedownscaler/internal/pkg/values" ) -var ( - layerCli = values.NewLayer() - layerEnv = values.NewLayer() - - // if the downscaler should take actions or just print them out - dryRun = false - // if debug information should be printed - debug = false - // if the scan should only run once - once = false - // how long to wait between scans - interval = 30 * time.Second - // list of namespaces to restrict the downscaler to - includeNamespaces []string - // list of resources to restrict the downscaler to - includeResources = []string{"deployments"} - // list of namespaces to ignore while downscaling - excludeNamespaces = values.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")} - // list of workload names to ignore while downscaling - excludeWorkloads values.RegexList - // workloads have to match one of these labels to be scaled - includeLabels values.RegexList - // annotation to use for grace-period instead of creation time - timeAnnotation string - // optional kubeconfig to use for testing purposes instead of the in-cluster config - kubeconfig string - // isLeader indicates if the current replica is the leader - isLeader atomic.Bool +const ( + // value defaults. + defaultGracePeriod = 15 * time.Minute + defaultDownscaleReplicas = 0 + + // runtime config defaults. + defaultInterval = 30 * time.Second ) -func init() { - // set defaults for layers - layerCli.GracePeriod = 15 * time.Minute - layerCli.DownscaleReplicas = 0 - - // cli layer values - flag.Var(&layerCli.DownscalePeriod, "downscale-period", "period to scale down in (default: never, incompatible: UpscaleTime, DownscaleTime)") - flag.Var(&layerCli.DownTime, "default-downtime", "timespans where workloads will be scaled down, outside of them they will be scaled up (default: never, incompatible: UpscalePeriod, DownscalePeriod)") - flag.Var(&layerCli.UpscalePeriod, "upscale-period", "periods to scale up in (default: never, incompatible: UpscaleTime, DownscaleTime)") - flag.Var(&layerCli.UpTime, "default-uptime", "timespans where workloads will be scaled up, outside of them they will be scaled down (default: never, incompatible: UpscalePeriod, DownscalePeriod)") - flag.Var(&layerCli.Exclude, "explicit-include", "sets exclude on cli layer to true, makes it so namespaces or deployments have to specify downscaler/exclude=false (default: false)") - flag.Var((*values.Int32Value)(&layerCli.DownscaleReplicas), "downtime-replicas", "the replicas to scale down to (default: 0)") - flag.Var((*values.DurationValue)(&layerCli.GracePeriod), "grace-period", "the grace period between creation of workload until first downscale (default: 15min)") - - // cli runtime configuration - flag.BoolVar(&dryRun, "dry-run", false, "print actions instead of doing them. enables debug logs (default: false)") - flag.BoolVar(&debug, "debug", false, "print more debug information (default: false)") - flag.BoolVar(&once, "once", false, "run scan only once (default: false)") - flag.Var((*values.DurationValue)(&interval), "interval", "time between scans (default: 30s)") - flag.Var((*values.StringListValue)(&includeNamespaces), "namespace", "restrict the downscaler to the specified namespaces (default: all)") - flag.Var((*values.StringListValue)(&includeResources), "include-resources", "restricts the downscaler to the specified resource types (default: deployments)") - flag.Var(&excludeNamespaces, "exclude-namespaces", "exclude namespaces from being scaled (default: kube-system,kube-downscaler)") - flag.Var(&excludeWorkloads, "exclude-deployments", "exclude deployments from being scaled (optional)") - flag.Var(&includeLabels, "matching-labels", "restricts the downscaler to workloads with these labels (default: all)") - flag.StringVar(&kubeconfig, "k", "", "kubeconfig to use instead of the in-cluster config (optional)") - flag.StringVar(&timeAnnotation, "deployment-time-annotation", "", "the annotation to use instead of creation time for grace period (optional)") - - // env runtime configuration - err := values.GetEnvValue("EXCLUDE_NAMESPACES", &excludeNamespaces) - if err != nil { - slog.Error("error while getting EXCLUDE_NAMESPACES environment variable", "error", err) - } - err = values.GetEnvValue("EXCLUDE_DEPLOYMENTS", &excludeWorkloads) - if err != nil { - slog.Error("error while getting EXCLUDE_DEPLOYMENTS environment variable", "error", err) +func main() { + // set defaults for runtime configuration + config := &util.RuntimeConfiguration{ + DryRun: false, + Debug: false, + Once: false, + Interval: defaultInterval, + IncludeNamespaces: nil, + IncludeResources: []string{"deployments"}, + ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, + ExcludeWorkloads: nil, + IncludeLabels: nil, + TimeAnnotation: "", + Kubeconfig: "", } - layerEnv, err = values.GetLayerFromEnv() + layerCli := values.NewLayer() + layerEnv := values.NewLayer() + + // set defaults for layers + layerCli.GracePeriod = defaultGracePeriod + layerCli.DownscaleReplicas = defaultDownscaleReplicas + + config.ParseConfigFlags() + + layerCli.ParseLayerFlags() + + flag.Parse() + + err := layerEnv.GetLayerFromEnv() if err != nil { slog.Error("failed to get layer from env", "error", err) + os.Exit(1) } -} -func main() { - flag.Parse() - if debug || dryRun { + if config.Debug || config.DryRun { slog.SetLogLoggerLevel(slog.LevelDebug) } - if err := layerCli.CheckForIncompatibleFields(); err != nil { + + if err = layerCli.CheckForIncompatibleFields(); err != nil { slog.Error("found incompatible fields", "error", err) os.Exit(1) } - downscalerNamespace, err := kubernetes.GetCurrentNamespaceFromFile() - if err != nil { - slog.Error("failed to get downscaler namespace", "error", err) - os.Exit(1) - } + ctx := context.Background() slog.Debug("getting client for kubernetes") - client, err := kubernetes.NewClient(kubeconfig, dryRun) + + client, err := kubernetes.NewClient(config.Kubeconfig, config.DryRun) if err != nil { slog.Error("failed to create new Kubernetes client", "error", err) os.Exit(1) } - // leader election and graceful termination - go func() { - // create a context to handle termination gracefully - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // listen for termination signals in a separate goroutine - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) - - // Goroutine for leader election - go func() { - err := client.CreateOrUpdateLease(ctx, downscalerNamespace, &isLeader) - if err != nil { - slog.Error("failed to acquire lease", "error", err) - os.Exit(1) - } - }() - - // pause and wait for termination signal - <-sigs - if isLeader.Load() { - slog.Debug("received termination signal, deleting lease") - - // delete the lease after termination signal is intercepted - err := client.DeleteLease(ctx, downscalerNamespace, &isLeader) - if err != nil { - slog.Error("failed to delete lease", "error", err) - } else { - slog.Debug("lease deleted successfully") - } - } + slog.Info("started downscaler") - // cancel the context to stop the leader election goroutine and exit the main process - cancel() + err = scanWorkloads(client, ctx, &layerCli, &layerEnv, config) + if err != nil { + slog.Error("failed to scan over workloads", + "error", err, + "config", config, + "CliLayer", layerCli, + "EnvLayer", layerEnv, + ) os.Exit(1) - }() + } +} - slog.Info("started downscaler") +// scanWorkloads scans over all workloads every scan. +func scanWorkloads( + client kubernetes.Client, + ctx context.Context, + layerCli, layerEnv *values.Layer, + config *util.RuntimeConfiguration, +) error { for { - if !isLeader.Load() { - slog.Debug("not the leader, skipping workload scanning") - time.Sleep(5 * time.Second) // Sync sleep with lease duration - continue - } slog.Info("scanning workloads") - workloads, err := client.GetWorkloads(includeNamespaces, includeResources, ctx) + workloads, err := client.GetWorkloads(config.IncludeNamespaces, config.IncludeResources, ctx) if err != nil { - slog.Error("failed to get workloads", "error", err) - os.Exit(1) + return fmt.Errorf("failed to get workloads: %w", err) } - workloads = scalable.FilterExcluded(workloads, includeLabels, excludeNamespaces, excludeWorkloads) + + workloads = scalable.FilterExcluded(workloads, config.IncludeLabels, config.ExcludeNamespaces, config.ExcludeWorkloads) slog.Info("scanning over workloads matching filters", "amount", len(workloads)) - var wg sync.WaitGroup + var waitGroup sync.WaitGroup for _, workload := range workloads { - wg.Add(1) + waitGroup.Add(1) + go func() { slog.Debug("scanning workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) - defer wg.Done() - err := scanWorkload(workload, client, ctx, layerCli, layerEnv) + defer waitGroup.Done() + + err := scanWorkload(workload, client, ctx, layerCli, layerEnv, config) if err != nil { slog.Error("failed to scan workload", "error", err, "workload", workload.GetName(), "namespace", workload.GetNamespace()) return @@ -187,45 +130,81 @@ func main() { slog.Debug("successfully scanned workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) }() } - wg.Wait() + + waitGroup.Wait() slog.Info("successfully scanned all workloads") - if once { + if config.Once { slog.Debug("once is set to true, exiting") break } - slog.Debug("waiting until next scan", "interval", interval.String()) - time.Sleep(time.Duration(interval)) + + slog.Debug("waiting until next scan", "interval", config.Interval.String()) + time.Sleep(config.Interval) } + + return nil } -// scanWorkload runs a scan on the worklod, determining the scaling and scaling the workload -func scanWorkload(workload scalable.Workload, client kubernetes.Client, ctx context.Context, layerCli, layerEnv values.Layer) error { +// scanWorkload runs a scan on the worklod, determining the scaling and scaling the workload. +func scanWorkload( + workload scalable.Workload, + client kubernetes.Client, + ctx context.Context, + layerCli, layerEnv *values.Layer, + config *util.RuntimeConfiguration, +) error { resourceLogger := kubernetes.NewResourceLogger(client, workload) namespaceAnnotations, err := client.GetNamespaceAnnotations(workload.GetNamespace(), ctx) if err != nil { return fmt.Errorf("failed to get namespace annotations: %w", err) } - layerWorkload, err := values.GetLayerFromAnnotations(workload.GetAnnotations(), resourceLogger, ctx) - if err != nil { + + slog.Debug( + "parsing workload layer from annotations", + "annotations", workload.GetAnnotations(), + "name", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + + layerWorkload := values.NewLayer() + if err = layerWorkload.GetLayerFromAnnotations(workload.GetAnnotations(), resourceLogger, ctx); err != nil { return fmt.Errorf("failed to parse workload layer from annotations: %w", err) } - layerNamespace, err := values.GetLayerFromAnnotations(namespaceAnnotations, resourceLogger, ctx) - if err != nil { + + slog.Debug( + "parsing namespace layer from annotations", + "annotations", namespaceAnnotations, + "name", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + + layerNamespace := values.NewLayer() + if err = layerNamespace.GetLayerFromAnnotations(namespaceAnnotations, resourceLogger, ctx); err != nil { return fmt.Errorf("failed to parse namespace layer from annotations: %w", err) } - layers := values.Layers{layerWorkload, layerNamespace, layerCli, layerEnv} + layers := values.Layers{&layerWorkload, &layerNamespace, layerCli, layerEnv} + + slog.Debug("finished parsing all layers", "layers", layers, "workload", workload.GetName(), "namespace", workload.GetNamespace()) - ok, err := layers.IsInGracePeriod(timeAnnotation, workload.GetAnnotations(), workload.GetCreationTimestamp().Time, resourceLogger, ctx) + isInGracePeriod, err := layers.IsInGracePeriod( + config.TimeAnnotation, + workload.GetAnnotations(), + workload.GetCreationTimestamp().Time, + resourceLogger, + ctx, + ) if err != nil { return fmt.Errorf("failed to get if workload is on grace period: %w", err) } - if ok { + + if isInGracePeriod { slog.Debug("workload is on grace period, skipping", "workload", workload.GetName(), "namespace", workload.GetNamespace()) return nil } + if layers.GetExcluded() { slog.Debug("workload is excluded, skipping", "workload", workload.GetName(), "namespace", workload.GetNamespace()) return nil @@ -236,27 +215,50 @@ func scanWorkload(workload scalable.Workload, client kubernetes.Client, ctx cont slog.Debug("scaling is not set by any layer, skipping", "workload", workload.GetName(), "namespace", workload.GetNamespace()) return nil } + + err = scaleWorkload(scaling, workload, layers, client, ctx) + if err != nil { + return fmt.Errorf("failed to scale workload: %w", err) + } + + return nil +} + +// scaleWorkload scales the given workload according to the given wanted scaling state. +func scaleWorkload( + scaling values.Scaling, + workload scalable.Workload, + layers values.Layers, + client kubernetes.Client, + ctx context.Context, +) error { if scaling == values.ScalingIgnore { slog.Debug("scaling is ignored, skipping", "workload", workload.GetName(), "namespace", workload.GetNamespace()) return nil } + if scaling == values.ScalingDown { slog.Debug("downscaling workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + downscaleReplicas, err := layers.GetDownscaleReplicas() if err != nil { return fmt.Errorf("failed to get downscale replicas: %w", err) } + err = client.DownscaleWorkload(downscaleReplicas, workload, ctx) if err != nil { return fmt.Errorf("failed to downscale workload: %w", err) } } + if scaling == values.ScalingUp { slog.Debug("upscaling workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + err := client.UpscaleWorkload(workload, ctx) if err != nil { return fmt.Errorf("failed to upscale workload: %w", err) } } + return nil } diff --git a/cmd/kubedownscaler/main_test.go b/cmd/kubedownscaler/main_test.go index 930ebb9..f3fb602 100644 --- a/cmd/kubedownscaler/main_test.go +++ b/cmd/kubedownscaler/main_test.go @@ -2,14 +2,16 @@ package main import ( "context" + "log/slog" "testing" "time" client "github.com/caas-team/gokubedownscaler/internal/api/kubernetes" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" + "github.com/caas-team/gokubedownscaler/internal/pkg/util" "github.com/caas-team/gokubedownscaler/internal/pkg/values" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -59,10 +61,15 @@ func (m *MockWorkload) GetCreationTimestamp() v1.Time { } func TestScanWorkload(t *testing.T) { + t.Parallel() + + slog.SetLogLoggerLevel(slog.LevelDebug) + ctx := context.TODO() layerCli := values.NewLayer() layerEnv := values.NewLayer() + config := &util.RuntimeConfiguration{} layerCli.DownscaleReplicas = 0 layerCli.GracePeriod = 15 * time.Minute @@ -72,7 +79,7 @@ func TestScanWorkload(t *testing.T) { mockWorkload.On("GetNamespace").Return("test-namespace") mockWorkload.On("GetName").Return("test-workload") - mockWorkload.On("GetCreationTimestamp").Return(time.Now().Add(-time.Duration(layerCli.GracePeriod))) + mockWorkload.On("GetCreationTimestamp").Return(time.Now().Add(-layerCli.GracePeriod)) mockWorkload.On("GetAnnotations").Return(map[string]string{ "downscaler/force-downtime": "true", }) @@ -80,9 +87,9 @@ func TestScanWorkload(t *testing.T) { mockClient.On("GetNamespaceAnnotations", "test-namespace", ctx).Return(map[string]string{}, nil) mockClient.On("DownscaleWorkload", int32(0), mockWorkload, ctx).Return(nil) - err := scanWorkload(mockWorkload, mockClient, ctx, layerCli, layerEnv) + err := scanWorkload(mockWorkload, mockClient, ctx, &layerCli, &layerEnv, config) - assert.NoError(t, err) + require.NoError(t, err) mockClient.AssertExpectations(t) mockWorkload.AssertExpectations(t) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index c24a0e9..b19b5d1 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -3,12 +3,9 @@ package kubernetes import ( "context" "crypto/sha256" - "errors" "fmt" "log/slog" - "os" "strings" - "sync/atomic" "time" argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" @@ -16,22 +13,17 @@ import ( keda "github.com/kedacore/keda/v2/pkg/generated/clientset/versioned" monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" zalando "github.com/zalando-incubator/stackset-controller/pkg/clientset" - coordv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) const ( - componentName = "kubedownscaler" - leaseName = "downscaler-lease" - leaseDuration = 30 * time.Second - leaseCheckSleepDuration = leaseDuration / 2 + componentName = "kubedownscaler" + timeout = 30 * time.Second ) -var errResourceNotSupported = errors.New("error: specified rescource type is not supported") - -// Client is an interface representing a high-level client to get and modify Kubernetes resources +// Client is an interface representing a high-level client to get and modify Kubernetes resources. type Client interface { // GetNamespaceAnnotations gets the annotations of the workload's namespace GetNamespaceAnnotations(namespace string, ctx context.Context) (map[string]string, error) @@ -43,13 +35,9 @@ type Client interface { UpscaleWorkload(workload scalable.Workload, ctx context.Context) error // addWorkloadEvent creates a new event on the workload addWorkloadEvent(eventType string, reason string, id string, message string, workload scalable.Workload, ctx context.Context) error - // CreateOrUpdateLease creates or update the downscaler lease - CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error - // DeleteLease deletes the downscaler lease - DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error } -// NewClient makes a new Client +// NewClient makes a new Client. func NewClient(kubeconfig string, dryRun bool) (client, error) { var kubeclient client var clientsets scalable.Clientsets @@ -60,65 +48,77 @@ func NewClient(kubeconfig string, dryRun bool) (client, error) { if err != nil { return kubeclient, fmt.Errorf("failed to get config for Kubernetes: %w", err) } + // set qps and burst rate limiting options. See https://kubernetes.io/docs/reference/config-api/apiserver-eventratelimit.v1alpha1/ config.QPS = 500 // available queries per second, when unused will fill the burst buffer config.Burst = 1000 // the max size of the buffer of queries + clientsets.Kubernetes, err = kubernetes.NewForConfig(config) if err != nil { return kubeclient, fmt.Errorf("failed to get clientset for Kubernetes resources: %w", err) } + clientsets.Keda, err = keda.NewForConfig(config) if err != nil { return kubeclient, fmt.Errorf("failed to get clientset for keda resources: %w", err) } + clientsets.Argo, err = argo.NewForConfig(config) if err != nil { return kubeclient, fmt.Errorf("failed to get clientset for argo resources: %w", err) } + clientsets.Zalando, err = zalando.NewForConfig(config) if err != nil { return kubeclient, fmt.Errorf("failed to get clientset for zalando resources: %w", err) } + clientsets.Monitoring, err = monitoring.NewForConfig(config) if err != nil { return kubeclient, fmt.Errorf("failed to get clientset for monitoring resources: %w", err) } + kubeclient.clientsets = &clientsets + return kubeclient, nil } -// client is a Kubernetes client with downscaling specific functions +// client is a Kubernetes client with downscaling specific functions. type client struct { clientsets *scalable.Clientsets dryRun bool } -// GetNamespaceAnnotations gets the annotations of the workload's namespace +// GetNamespaceAnnotations gets the annotations of the workload's namespace. func (c client) GetNamespaceAnnotations(namespace string, ctx context.Context) (map[string]string, error) { ns, err := c.clientsets.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get namespace: %w", err) } + return ns.Annotations, nil } -// GetWorkloads gets all workloads of the specified resources for the specified namespaces -func (c client) GetWorkloads(namespaces []string, resourceTypes []string, ctx context.Context) ([]scalable.Workload, error) { +// GetWorkloads gets all workloads of the specified resources for the specified namespaces. +func (c client) GetWorkloads(namespaces, resourceTypes []string, ctx context.Context) ([]scalable.Workload, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + var results []scalable.Workload + if namespaces == nil { namespaces = []string{""} } + for _, namespace := range namespaces { for _, resourceType := range resourceTypes { slog.Debug("getting workloads from resource type", "resourceType", resourceType) - getWorkloads, ok := scalable.GetWorkloads[strings.ToLower(resourceType)] - if !ok { - return nil, errResourceNotSupported - } - workloads, err := getWorkloads(namespace, c.clientsets, ctx) + + workloads, err := scalable.GetWorkloads(strings.ToLower(resourceType), namespace, c.clientsets, ctx) if err != nil { return nil, fmt.Errorf("failed to get workloads: %w", err) } + results = append(results, workloads...) } } @@ -126,69 +126,90 @@ func (c client) GetWorkloads(namespaces []string, resourceTypes []string, ctx co return results, nil } -// DownscaleWorkload downscales the workload to the specified replicas +// DownscaleWorkload downscales the workload to the specified replicas. func (c client) DownscaleWorkload(replicas int32, workload scalable.Workload, ctx context.Context) error { err := workload.ScaleDown(replicas) if err != nil { return fmt.Errorf("failed to set the workload into a scaled down state: %w", err) } + if c.dryRun { - slog.Info("running in dry run mode, would have sent update workload request to scale down workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + slog.Info( + "running in dry run mode, would have sent update workload request to scale down workload", + "workload", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + return nil } + err = workload.Update(c.clientsets, ctx) if err != nil { return fmt.Errorf("failed to update the workload: %w", err) } + slog.Debug("successfully scaled down workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + return nil } -// UpscaleWorkload upscales the workload to the original replicas +// UpscaleWorkload upscales the workload to the original replicas. func (c client) UpscaleWorkload(workload scalable.Workload, ctx context.Context) error { err := workload.ScaleUp() if err != nil { return fmt.Errorf("failed to set the workload into a scaled up state: %w", err) } + if c.dryRun { - slog.Info("running in dry run mode, would have sent update workload request to scale up workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + slog.Info( + "running in dry run mode, would have sent update workload request to scale up workload", + "workload", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + return nil } + err = workload.Update(c.clientsets, ctx) if err != nil { return fmt.Errorf("failed to update the workload: %w", err) } + slog.Debug("successfully scaled up workload", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + return nil } -// addWorkloadEvent creates or updates a new event on the workload -func (c client) addWorkloadEvent(eventType, reason, id, message string, workload scalable.Workload, ctx context.Context) error { +// addWorkloadEvent creates or updates a new event on the workload. +func (c client) addWorkloadEvent(eventType, reason, identifier, message string, workload scalable.Workload, ctx context.Context) error { if c.dryRun { slog.Info("running in dry run mode, would have added an event on workload", "workload", workload.GetName(), "namespace", workload.GetNamespace(), "eventType", eventType, "reason", reason, - "id", id, + "id", identifier, "message", message, ) + return nil } - hash := sha256.Sum256([]byte(fmt.Sprintf("%s.%s", id, message))) + hash := sha256.Sum256([]byte(fmt.Sprintf("%s.%s", identifier, message))) name := fmt.Sprintf("%s.%s.%x", workload.GetName(), reason, hash) eventsClient := c.clientsets.Kubernetes.CoreV1().Events(workload.GetNamespace()) // check if event already exists if event, err := eventsClient.Get(ctx, name, metav1.GetOptions{}); err == nil && event != nil { // update event - event.Count += 1 + event.Count++ event.LastTimestamp = metav1.Now() + _, err := eventsClient.Update(ctx, event, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update event: %w", err) } + return nil } @@ -216,88 +237,6 @@ func (c client) addWorkloadEvent(eventType, reason, id, message string, workload if err != nil { return fmt.Errorf("failed to create event: %w", err) } - return nil -} - -// CreateOrUpdateLease attempts to acquire and maintain a lease for leadership. -func (c client) CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { - // get hostname for holder identity - holderIdentity, err := os.Hostname() - if err != nil { - slog.Error("failed to get hostname", "error", err) - return err - } - - leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) - leaseDurationSeconds := int32(leaseDuration.Seconds()) - - for { - // lease Object - lease := &coordv1.Lease{ - ObjectMeta: metav1.ObjectMeta{ - Name: leaseName, - Namespace: leaseNamespace, - }, - Spec: coordv1.LeaseSpec{ - HolderIdentity: &holderIdentity, - LeaseDurationSeconds: &leaseDurationSeconds, - RenewTime: &metav1.MicroTime{Time: time.Now()}, - }, - } - - // search for an existing lease inside the namespace - existingLease, err := leasesClient.Get(ctx, leaseName, metav1.GetOptions{}) - if err != nil { - // creates new lease if lease doesn't exist, and jump to the next iteration - slog.Debug("creating new lease", "lease", leaseName, "namespace", leaseNamespace) - _, err = leasesClient.Create(ctx, lease, metav1.CreateOptions{}) - if err != nil { - slog.Error("failed to create lease", "error", err) - time.Sleep(leaseCheckSleepDuration) - continue - } - slog.Debug("acquired lease", "holder", holderIdentity, "namespace", leaseNamespace) - isLeader.Store(true) - } else { - // check if the existing lease has expired or is held by another pod; if it is held by another pod jump to the next iteration - if existingLease.Spec.RenewTime != nil && - time.Since(existingLease.Spec.RenewTime.Time) < leaseDuration { - if *existingLease.Spec.HolderIdentity != holderIdentity { - slog.Debug("lease already held by another", "holder", *existingLease.Spec.HolderIdentity) - isLeader.Store(false) - time.Sleep(leaseCheckSleepDuration) - continue - } - } - - // update the lease if it is currently held by the current pod - existingLease.Spec.HolderIdentity = &holderIdentity - existingLease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} - _, err = leasesClient.Update(ctx, existingLease, metav1.UpdateOptions{}) - if err != nil { - slog.Error("failed to update lease", "error", err) - time.Sleep(leaseCheckSleepDuration) - continue - } - slog.Debug("lease renewed", "holder", holderIdentity, "namespace", leaseNamespace) - isLeader.Store(true) - } - - // sleep before renewing - time.Sleep(leaseCheckSleepDuration) - } -} - -func (c client) DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { - leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) - - err := leasesClient.Delete(ctx, leaseName, metav1.DeleteOptions{}) - if err != nil { - slog.Error("failed to delete lease", "lease", leaseName, "namespace", leaseNamespace) - return err - } - isLeader.Store(false) - slog.Debug("deleted lease", "lease", leaseName, "namespace", leaseNamespace) return nil } diff --git a/internal/api/kubernetes/resourceLogger.go b/internal/api/kubernetes/resourceLogger.go index 6d1aa5c..a491a21 100644 --- a/internal/api/kubernetes/resourceLogger.go +++ b/internal/api/kubernetes/resourceLogger.go @@ -15,6 +15,7 @@ func NewResourceLogger(client Client, workload scalable.Workload) resourceLogger workload: workload, client: client, } + return logger } @@ -23,7 +24,7 @@ type resourceLogger struct { client Client } -// ErrorInvalidAnnotation adds an annotation error on the resource +// ErrorInvalidAnnotation adds an annotation error on the resource. func (r resourceLogger) ErrorInvalidAnnotation(annotation, message string, ctx context.Context) { err := r.client.addWorkloadEvent(v1.EventTypeWarning, reasonInvalidConfiguration, annotation, message, r.workload, ctx) if err != nil { @@ -32,7 +33,7 @@ func (r resourceLogger) ErrorInvalidAnnotation(annotation, message string, ctx c } } -// ErrorIncompatibleFields adds an incompatible fields error on the resource +// ErrorIncompatibleFields adds an incompatible fields error on the resource. func (r resourceLogger) ErrorIncompatibleFields(message string, ctx context.Context) { err := r.client.addWorkloadEvent(v1.EventTypeWarning, reasonInvalidConfiguration, reasonInvalidConfiguration, message, r.workload, ctx) if err != nil { diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 83d9f80..91b1827 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -8,12 +8,13 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -// getConfig gets a rest.Config for the specified kubeconfig or if empty from the in-cluster config +// getConfig gets a rest.Config for the specified kubeconfig or if empty from the in-cluster config. func getConfig(kubeconfig string) (*rest.Config, error) { if kubeconfig == "" { - return rest.InClusterConfig() + return rest.InClusterConfig() //nolint: wrapcheck // error gets wrapped in the calling function, so its fine } - return clientcmd.BuildConfigFromFlags("", kubeconfig) + + return clientcmd.BuildConfigFromFlags("", kubeconfig) //nolint: wrapcheck // error gets wrapped in the calling function, so its fine } // GetCurrentNamespaceFromFile retrieves downscaler namespace from its service account file diff --git a/internal/pkg/scalable/cronjobs.go b/internal/pkg/scalable/cronjobs.go index 8210ff2..c3415b2 100644 --- a/internal/pkg/scalable/cronjobs.go +++ b/internal/pkg/scalable/cronjobs.go @@ -1,3 +1,4 @@ +//nolint:dupl // this code is very similar for every resource, but its not really abstractable to avoid more duplication package scalable import ( @@ -8,34 +9,37 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getCronJobs is the getResourceFunc for CronJobs +// getCronJobs is the getResourceFunc for CronJobs. func getCronJobs(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - cronjobs, err := clientsets.Kubernetes.BatchV1().CronJobs(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + cronjobs, err := clientsets.Kubernetes.BatchV1().CronJobs(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get cronjobs: %w", err) } + results := make([]Workload, 0, len(cronjobs.Items)) for i := range cronjobs.Items { results = append(results, &suspendScaledWorkload{&cronJob{&cronjobs.Items[i]}}) } + return results, nil } -// cronJob is a wrapper for batch/v1.CronJob to implement the suspendScaledResource interface +// cronJob is a wrapper for batch/v1.CronJob to implement the suspendScaledResource interface. type cronJob struct { *batch.CronJob } -// setSuspend sets the value of the suspend field on the cronJob +// setSuspend sets the value of the suspend field on the cronJob. func (c *cronJob) setSuspend(suspend bool) { c.Spec.Suspend = &suspend } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (c *cronJob) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Kubernetes.BatchV1().CronJobs(c.Namespace).Update(ctx, c.CronJob, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update cronjob: %w", err) } + return nil } diff --git a/internal/pkg/scalable/daemonsets.go b/internal/pkg/scalable/daemonsets.go index 3630991..3e2bae0 100644 --- a/internal/pkg/scalable/daemonsets.go +++ b/internal/pkg/scalable/daemonsets.go @@ -12,44 +12,49 @@ const ( labelMatchNone = "downscaler/match-none" ) -// getDaemonSets is the getResourceFunc for DaemonSets +// getDaemonSets is the getResourceFunc for DaemonSets. func getDaemonSets(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - daemonsets, err := clientsets.Kubernetes.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + daemonsets, err := clientsets.Kubernetes.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get daemonsets: %w", err) } + results := make([]Workload, 0, len(daemonsets.Items)) for i := range daemonsets.Items { results = append(results, &daemonSet{&daemonsets.Items[i]}) } + return results, nil } -// daemonSet is a wrapper for apps/v1.DeamonSet to implement the Workload interface +// daemonSet is a wrapper for apps/v1.DeamonSet to implement the Workload interface. type daemonSet struct { *appsv1.DaemonSet } -// ScaleUp scales the resource up +// ScaleUp scales the resource up. func (d *daemonSet) ScaleUp() error { delete(d.Spec.Template.Spec.NodeSelector, labelMatchNone) return nil } -// ScaleDown scales the resource down +// ScaleDown scales the resource down. func (d *daemonSet) ScaleDown(_ int32) error { if d.Spec.Template.Spec.NodeSelector == nil { d.Spec.Template.Spec.NodeSelector = map[string]string{} } + d.Spec.Template.Spec.NodeSelector[labelMatchNone] = "true" + return nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (d *daemonSet) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Kubernetes.AppsV1().DaemonSets(d.Namespace).Update(ctx, d.DaemonSet, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update daemonset: %w", err) } + return nil } diff --git a/internal/pkg/scalable/daemonsets_test.go b/internal/pkg/scalable/daemonsets_test.go index 77d9d65..6f0cdcf 100644 --- a/internal/pkg/scalable/daemonsets_test.go +++ b/internal/pkg/scalable/daemonsets_test.go @@ -4,10 +4,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" ) func TestDaemonSet_ScaleUp(t *testing.T) { + t.Parallel() + tests := []struct { name string labelSet bool @@ -27,20 +30,26 @@ func TestDaemonSet_ScaleUp(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ds := daemonSet{&appsv1.DaemonSet{}} + t.Parallel() + + deamonset := daemonSet{&appsv1.DaemonSet{}} + if test.labelSet { - ds.Spec.Template.Spec.NodeSelector = map[string]string{labelMatchNone: "true"} + deamonset.Spec.Template.Spec.NodeSelector = map[string]string{labelMatchNone: "true"} } - err := ds.ScaleUp() - assert.NoError(t, err) - _, ok := ds.Spec.Template.Spec.NodeSelector[labelMatchNone] + err := deamonset.ScaleUp() + require.NoError(t, err) + + _, ok := deamonset.Spec.Template.Spec.NodeSelector[labelMatchNone] assert.Equal(t, test.wantLabelSet, ok) }) } } func TestDaemonSet_ScaleDown(t *testing.T) { + t.Parallel() + tests := []struct { name string labelSet bool @@ -60,14 +69,18 @@ func TestDaemonSet_ScaleDown(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ds := daemonSet{&appsv1.DaemonSet{}} + t.Parallel() + + deamonset := daemonSet{&appsv1.DaemonSet{}} + if test.labelSet { - ds.Spec.Template.Spec.NodeSelector = map[string]string{labelMatchNone: "true"} + deamonset.Spec.Template.Spec.NodeSelector = map[string]string{labelMatchNone: "true"} } - err := ds.ScaleDown(0) - assert.NoError(t, err) - _, ok := ds.Spec.Template.Spec.NodeSelector[labelMatchNone] + err := deamonset.ScaleDown(0) + require.NoError(t, err) + + _, ok := deamonset.Spec.Template.Spec.NodeSelector[labelMatchNone] assert.Equal(t, test.wantLabelSet, ok) }) } diff --git a/internal/pkg/scalable/deployments.go b/internal/pkg/scalable/deployments.go index 0377013..347a4ca 100644 --- a/internal/pkg/scalable/deployments.go +++ b/internal/pkg/scalable/deployments.go @@ -1,3 +1,4 @@ +//nolint:dupl // this code is very similar for every resource, but its not really abstractable to avoid more duplication package scalable import ( @@ -8,44 +9,48 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getDeployments is the getResourceFunc for Deployments +// getDeployments is the getResourceFunc for Deployments. func getDeployments(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - deployments, err := clientsets.Kubernetes.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + deployments, err := clientsets.Kubernetes.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get deployments: %w", err) } + results := make([]Workload, 0, len(deployments.Items)) for i := range deployments.Items { results = append(results, &replicaScaledWorkload{&deployment{&deployments.Items[i]}}) } + return results, nil } -// deployment is a wrapper for apps/v1.Deployment to implement the replicaScaledResource interface +// deployment is a wrapper for apps/v1.Deployment to implement the replicaScaledResource interface. type deployment struct { *appsv1.Deployment } -// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called. func (d *deployment) setReplicas(replicas int32) error { d.Spec.Replicas = &replicas return nil } -// getReplicas gets the current amount of replicas of the resource +// getReplicas gets the current amount of replicas of the resource. func (d *deployment) getReplicas() (int32, error) { replicas := d.Spec.Replicas if replicas == nil { return 0, errNoReplicasSpecified } + return *d.Spec.Replicas, nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (d *deployment) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Kubernetes.AppsV1().Deployments(d.Namespace).Update(ctx, d.Deployment, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update deployment: %w", err) } + return nil } diff --git a/internal/pkg/scalable/helpers_test.go b/internal/pkg/scalable/helpers_test.go index c78e4c5..47249f2 100644 --- a/internal/pkg/scalable/helpers_test.go +++ b/internal/pkg/scalable/helpers_test.go @@ -10,13 +10,15 @@ func boolAsPointer(value bool) *bool { return &value } -// assertBoolPointerEqual checks if two bool pointers equal in state, being nil or pointing to true or false +// assertBoolPointerEqual checks if two bool pointers equal in state, being nil or pointing to true or false. func assertBoolPointerEqual(t *testing.T, expected, actual *bool) { t.Helper() + if expected == nil { assert.Nil(t, actual) return } + if assert.NotNil(t, actual) { assert.Equal(t, *expected, *actual) } @@ -26,13 +28,15 @@ func intAsPointer(value int32) *int32 { return &value } -// assertIntPointerEqual checks if two int pointers equal in state, being nil or pointing to the same integer value +// assertIntPointerEqual checks if two int pointers equal in state, being nil or pointing to the same integer value. func assertIntPointerEqual(t *testing.T, expected, actual *int32) { t.Helper() + if expected == nil { assert.Nil(t, actual) return } + if assert.NotNil(t, actual) { assert.Equal(t, *expected, *actual) } diff --git a/internal/pkg/scalable/horizontalpodautoscalers.go b/internal/pkg/scalable/horizontalpodautoscalers.go index e891768..0311e62 100644 --- a/internal/pkg/scalable/horizontalpodautoscalers.go +++ b/internal/pkg/scalable/horizontalpodautoscalers.go @@ -11,47 +11,56 @@ import ( var errMinReplicasBoundsExceeded = errors.New("error: a HPAs minReplicas can only be set to int32 values larger than 1") -// getHorizontalPodAutoscalers is the getResourceFunc for horizontalPodAutoscalers +// getHorizontalPodAutoscalers is the getResourceFunc for horizontalPodAutoscalers. func getHorizontalPodAutoscalers(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - hpas, err := clientsets.Kubernetes.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + hpas, err := clientsets.Kubernetes.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get horizontalpodautoscalers: %w", err) } + results := make([]Workload, 0, len(hpas.Items)) for i := range hpas.Items { results = append(results, &replicaScaledWorkload{&horizontalPodAutoscaler{&hpas.Items[i]}}) } + return results, nil } -// horizontalPodAutoscaler is a wrapper for autoscaling/v2.HorizontalPodAutoscaler to implement the replicaScaledResource interface +// horizontalPodAutoscaler is a wrapper for autoscaling/v2.HorizontalPodAutoscaler to implement the replicaScaledResource interface. type horizontalPodAutoscaler struct { *appsv1.HorizontalPodAutoscaler } -// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called. func (h *horizontalPodAutoscaler) setReplicas(replicas int32) error { if replicas < 1 { return errMinReplicasBoundsExceeded } + h.Spec.MinReplicas = &replicas + return nil } -// getReplicas gets the current amount of replicas of the resource +// getReplicas gets the current amount of replicas of the resource. func (h *horizontalPodAutoscaler) getReplicas() (int32, error) { replicas := h.Spec.MinReplicas if replicas == nil { return 0, errNoReplicasSpecified } + return *h.Spec.MinReplicas, nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (h *horizontalPodAutoscaler) Update(clientsets *Clientsets, ctx context.Context) error { - _, err := clientsets.Kubernetes.AutoscalingV2().HorizontalPodAutoscalers(h.Namespace).Update(ctx, h.HorizontalPodAutoscaler, metav1.UpdateOptions{}) + _, err := clientsets.Kubernetes.AutoscalingV2().HorizontalPodAutoscalers(h.Namespace).Update( + ctx, h.HorizontalPodAutoscaler, + metav1.UpdateOptions{}, + ) if err != nil { return fmt.Errorf("failed to update horizontalpodautoscaler: %w", err) } + return nil } diff --git a/internal/pkg/scalable/jobs.go b/internal/pkg/scalable/jobs.go index 67cf14c..ad165c5 100644 --- a/internal/pkg/scalable/jobs.go +++ b/internal/pkg/scalable/jobs.go @@ -1,3 +1,4 @@ +//nolint:dupl // this code is very similar for every resource, but its not really abstractable to avoid more duplication package scalable import ( @@ -8,34 +9,37 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getDeployments is the getResourceFunc for Jobs +// getDeployments is the getResourceFunc for Jobs. func getJobs(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - jobs, err := clientsets.Kubernetes.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + jobs, err := clientsets.Kubernetes.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get jobs: %w", err) } + results := make([]Workload, 0, len(jobs.Items)) for i := range jobs.Items { results = append(results, &suspendScaledWorkload{&job{&jobs.Items[i]}}) } + return results, nil } -// job is a wrapper for batch/v1.Job to implement the suspendScaledResource interface +// job is a wrapper for batch/v1.Job to implement the suspendScaledResource interface. type job struct { *batch.Job } -// setSuspend sets the value of the suspend field on the job -func (c *job) setSuspend(suspend bool) { - c.Spec.Suspend = &suspend +// setSuspend sets the value of the suspend field on the job. +func (j *job) setSuspend(suspend bool) { + j.Spec.Suspend = &suspend } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (j *job) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Kubernetes.BatchV1().Jobs(j.Namespace).Update(ctx, j.Job, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update job: %w", err) } + return nil } diff --git a/internal/pkg/scalable/poddisruptionbudgets.go b/internal/pkg/scalable/poddisruptionbudgets.go index 9852df6..1c1e36b 100644 --- a/internal/pkg/scalable/poddisruptionbudgets.go +++ b/internal/pkg/scalable/poddisruptionbudgets.go @@ -6,123 +6,143 @@ import ( "log/slog" "github.com/caas-team/gokubedownscaler/internal/pkg/values" - policy "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) -// getPodDisruptionBudgets is the getResourceFunc for podDisruptionBudget +// getPodDisruptionBudgets is the getResourceFunc for podDisruptionBudget. func getPodDisruptionBudgets(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - poddisruptionbudgets, err := clientsets.Kubernetes.PolicyV1().PodDisruptionBudgets(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + poddisruptionbudgets, err := clientsets.Kubernetes.PolicyV1().PodDisruptionBudgets(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get poddisruptionbudgets: %w", err) } + results := make([]Workload, 0, len(poddisruptionbudgets.Items)) for i := range poddisruptionbudgets.Items { results = append(results, &podDisruptionBudget{&poddisruptionbudgets.Items[i]}) } + return results, nil } -// podDisruptionBudget is a wrapper for policy/v1.PodDisruptionBudget to implement the Workload interface +// podDisruptionBudget is a wrapper for policy/v1.PodDisruptionBudget to implement the Workload interface. type podDisruptionBudget struct { *policy.PodDisruptionBudget } -// getMinAvailableInt returns the spec.MinAvailable value if it is not a percentage +// getMinAvailableInt returns the spec.MinAvailable value if it is not a percentage. func (p *podDisruptionBudget) getMinAvailableInt() int32 { minAvailable := p.Spec.MinAvailable if minAvailable == nil { return values.Undefined } + if minAvailable.Type == intstr.String { return values.Undefined } + return minAvailable.IntVal } -// setMinAvailable applies a new value to spec.MinAvailable +// setMinAvailable applies a new value to spec.MinAvailable. func (p *podDisruptionBudget) setMinAvailable(targetMinAvailable int32) { minAvailable := intstr.FromInt32(targetMinAvailable) p.Spec.MinAvailable = &minAvailable } -// getMaxUnavailableInt returns the spec.MaxUnavailable value if it is not a percentage +// getMaxUnavailableInt returns the spec.MaxUnavailable value if it is not a percentage. func (p *podDisruptionBudget) getMaxUnavailableInt() int32 { maxUnavailable := p.Spec.MaxUnavailable if maxUnavailable == nil { return values.Undefined } + if maxUnavailable.Type == intstr.String { return values.Undefined } + return maxUnavailable.IntVal } -// setMaxUnavailable applies a new value to spec.MaxUnavailable +// setMaxUnavailable applies a new value to spec.MaxUnavailable. func (p *podDisruptionBudget) setMaxUnavailable(targetMaxUnavailable int32) { maxUnavailable := intstr.FromInt32(targetMaxUnavailable) p.Spec.MaxUnavailable = &maxUnavailable } -// ScaleUp scales the resource up +// ScaleUp scales the resource up. func (p *podDisruptionBudget) ScaleUp() error { originalReplicas, err := getOriginalReplicas(p) if err != nil { return fmt.Errorf("failed to get original replicas for workload: %w", err) } + if originalReplicas == nil { slog.Debug("original replicas is not set, skipping", "workload", p.GetName(), "namespace", p.GetNamespace()) return nil } + maxUnavailable := p.getMaxUnavailableInt() - minAvailable := p.getMinAvailableInt() if maxUnavailable != values.Undefined { p.setMaxUnavailable(*originalReplicas) removeOriginalReplicas(p) + return nil } + + minAvailable := p.getMinAvailableInt() if minAvailable != values.Undefined { p.setMinAvailable(*originalReplicas) removeOriginalReplicas(p) + return nil } + slog.Debug("can't scale PodDisruptionBudgets with percent availability", "workload", p.GetName(), "namespace", p.GetNamespace()) + return nil } -// ScaleDown scales the resource down +// ScaleDown scales the resource down. func (p *podDisruptionBudget) ScaleDown(downscaleReplicas int32) error { maxUnavailable := p.getMaxUnavailableInt() - minAvailable := p.getMinAvailableInt() if maxUnavailable != values.Undefined { if maxUnavailable == downscaleReplicas { slog.Debug("workload is already scaled down, skipping", "workload", p.GetName(), "namespace", p.GetNamespace()) return nil } + p.setMaxUnavailable(downscaleReplicas) setOriginalReplicas(maxUnavailable, p) + return nil } + + minAvailable := p.getMinAvailableInt() if minAvailable != values.Undefined { if minAvailable == downscaleReplicas { slog.Debug("workload is already scaled down, skipping", "workload", p.GetName(), "namespace", p.GetNamespace()) return nil } + p.setMinAvailable(downscaleReplicas) setOriginalReplicas(minAvailable, p) + return nil } + slog.Debug("can't scale PodDisruptionBudgets with percent availability", "workload", p.GetName(), "namespace", p.GetNamespace()) + return nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (p *podDisruptionBudget) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Kubernetes.PolicyV1().PodDisruptionBudgets(p.Namespace).Update(ctx, p.PodDisruptionBudget, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update poddisruptionbudget: %w", err) } + return nil } diff --git a/internal/pkg/scalable/poddisruptionbudgets_test.go b/internal/pkg/scalable/poddisruptionbudgets_test.go index 514ac76..5aeb23f 100644 --- a/internal/pkg/scalable/poddisruptionbudgets_test.go +++ b/internal/pkg/scalable/poddisruptionbudgets_test.go @@ -4,11 +4,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" policy "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/util/intstr" ) func TestPodDisruptionBudget_ScaleUp(t *testing.T) { + t.Parallel() + replicasUpscaled := intstr.FromInt32(5) replicasDownscaled := intstr.FromInt32(0) percentile := intstr.FromString("50%") @@ -115,20 +118,25 @@ func TestPodDisruptionBudget_ScaleUp(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + pdb := &podDisruptionBudget{&policy.PodDisruptionBudget{}} pdb.Spec.MaxUnavailable = test.maxUnavailable pdb.Spec.MinAvailable = test.minAvailable + if test.originalReplicas != nil { setOriginalReplicas(*test.originalReplicas, pdb) } err := pdb.ScaleUp() - assert.NoError(t, err) + require.NoError(t, err) + if test.wantMaxUnavailable != nil { if assert.NotNil(t, pdb.Spec.MaxUnavailable) { assert.Equal(t, *test.wantMaxUnavailable, *pdb.Spec.MaxUnavailable) } } + if test.wantMinAvailable != nil { if assert.NotNil(t, pdb.Spec.MinAvailable) { assert.Equal(t, *test.wantMinAvailable, *pdb.Spec.MinAvailable) @@ -136,13 +144,15 @@ func TestPodDisruptionBudget_ScaleUp(t *testing.T) { } oringalReplicas, err := getOriginalReplicas(pdb) - assert.NoError(t, err) // Scaling set OrignialReplicas to faulty value + require.NoError(t, err) // Scaling set OrignialReplicas to faulty value assertIntPointerEqual(t, test.wantOriginalReplicas, oringalReplicas) }) } } func TestPodDisruptionBudget_ScaleDown(t *testing.T) { + t.Parallel() + replicasUpscaled := intstr.FromInt32(5) replicasUpscaled2 := intstr.FromInt32(2) replicasDownscaled := intstr.FromInt32(0) @@ -250,20 +260,25 @@ func TestPodDisruptionBudget_ScaleDown(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + pdb := &podDisruptionBudget{&policy.PodDisruptionBudget{}} pdb.Spec.MaxUnavailable = test.maxUnavailable pdb.Spec.MinAvailable = test.minAvailable + if test.originalReplicas != nil { setOriginalReplicas(*test.originalReplicas, pdb) } err := pdb.ScaleDown(0) - assert.NoError(t, err) + require.NoError(t, err) + if test.wantMaxUnavailable != nil { if assert.NotNil(t, pdb.Spec.MaxUnavailable) { assert.Equal(t, *test.wantMaxUnavailable, *pdb.Spec.MaxUnavailable) } } + if test.wantMinAvailable != nil { if assert.NotNil(t, pdb.Spec.MinAvailable) { assert.Equal(t, *test.wantMinAvailable, *pdb.Spec.MinAvailable) @@ -271,7 +286,7 @@ func TestPodDisruptionBudget_ScaleDown(t *testing.T) { } oringalReplicas, err := getOriginalReplicas(pdb) - assert.NoError(t, err) // Scaling set OrignialReplicas to faulty value + require.NoError(t, err) // Scaling set OrignialReplicas to faulty value assertIntPointerEqual(t, test.wantOriginalReplicas, oringalReplicas) }) } diff --git a/internal/pkg/scalable/prometheuses.go b/internal/pkg/scalable/prometheuses.go index 63430e5..61bc3d9 100644 --- a/internal/pkg/scalable/prometheuses.go +++ b/internal/pkg/scalable/prometheuses.go @@ -8,44 +8,48 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getPrometheuses is the getResourceFunc for Prometheuses +// getPrometheuses is the getResourceFunc for Prometheuses. func getPrometheuses(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - prometheuses, err := clientsets.Monitoring.MonitoringV1().Prometheuses(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + prometheuses, err := clientsets.Monitoring.MonitoringV1().Prometheuses(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get prometheuses: %w", err) } + results := make([]Workload, 0, len(prometheuses.Items)) for i := range prometheuses.Items { results = append(results, &replicaScaledWorkload{&prometheus{prometheuses.Items[i]}}) } + return results, nil } -// prometheus is a wrapper for monitoring.coreos.com/v1.Prometheus to implement the replicaScaledResource interface +// prometheus is a wrapper for monitoring.coreos.com/v1.Prometheus to implement the replicaScaledResource interface. type prometheus struct { *monitoringv1.Prometheus } -// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called. func (p *prometheus) setReplicas(replicas int32) error { p.Spec.Replicas = &replicas return nil } -// getReplicas gets the current amount of replicas of the resource +// getReplicas gets the current amount of replicas of the resource. func (p *prometheus) getReplicas() (int32, error) { replicas := p.Spec.Replicas if replicas == nil { return 0, errNoReplicasSpecified } + return *p.Spec.Replicas, nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (p *prometheus) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Monitoring.MonitoringV1().Prometheuses(p.Namespace).Update(ctx, p.Prometheus, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update prometheus: %w", err) } + return nil } diff --git a/internal/pkg/scalable/replicaScaledWorkloads.go b/internal/pkg/scalable/replicaScaledWorkloads.go index 54e596e..88b3317 100644 --- a/internal/pkg/scalable/replicaScaledWorkloads.go +++ b/internal/pkg/scalable/replicaScaledWorkloads.go @@ -6,7 +6,7 @@ import ( "log/slog" ) -// replicaScaledResource provides all the functions needed to scale a resource which is scaled by setting the replica count +// replicaScaledResource provides all the functions needed to scale a resource which is scaled by setting the replica count. type replicaScaledResource interface { scalableResource // Update updates the resource with all changes made to it. It should only be called once on a resource @@ -17,17 +17,18 @@ type replicaScaledResource interface { getReplicas() (int32, error) } -// replicaScaledWorkload is a wrapper for all resources which are scaled by setting the replica count +// replicaScaledWorkload is a wrapper for all resources which are scaled by setting the replica count. type replicaScaledWorkload struct { replicaScaledResource } -// ScaleUp scales up the underlying replicaScaledResource +// ScaleUp scales up the underlying replicaScaledResource. func (r *replicaScaledWorkload) ScaleUp() error { originalReplicas, err := getOriginalReplicas(r) if err != nil { return fmt.Errorf("failed to get original replicas for workload: %w", err) } + if originalReplicas == nil { slog.Debug("original replicas is not set, skipping", "workload", r.GetName(), "namespace", r.GetNamespace()) return nil @@ -37,16 +38,19 @@ func (r *replicaScaledWorkload) ScaleUp() error { if err != nil { return fmt.Errorf("failed to set original replicas for workload: %w", err) } + removeOriginalReplicas(r) + return nil } -// ScaleDown scales down the underlying replicaScaledResource +// ScaleDown scales down the underlying replicaScaledResource. func (r *replicaScaledWorkload) ScaleDown(downscaleReplicas int32) error { originalReplicas, err := r.getReplicas() if err != nil { return fmt.Errorf("failed to get original replicas for workload: %w", err) } + if originalReplicas == downscaleReplicas { slog.Debug("workload is already scaled down, skipping", "workload", r.GetName(), "namespace", r.GetNamespace()) return nil @@ -56,6 +60,8 @@ func (r *replicaScaledWorkload) ScaleDown(downscaleReplicas int32) error { if err != nil { return fmt.Errorf("failed to set replicas for workload: %w", err) } + setOriginalReplicas(originalReplicas, r) + return nil } diff --git a/internal/pkg/scalable/replicaScaledWorkloads_test.go b/internal/pkg/scalable/replicaScaledWorkloads_test.go index 45f64d0..232d2e5 100644 --- a/internal/pkg/scalable/replicaScaledWorkloads_test.go +++ b/internal/pkg/scalable/replicaScaledWorkloads_test.go @@ -4,10 +4,13 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" ) func TestReplicaScaledWorkload_ScaleUp(t *testing.T) { + t.Parallel() + tests := []struct { name string replicas int32 @@ -40,26 +43,33 @@ func TestReplicaScaledWorkload_ScaleUp(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r := &replicaScaledWorkload{&deployment{&appsv1.Deployment{}}} - _ = r.setReplicas(test.replicas) + t.Parallel() + + deployment := &replicaScaledWorkload{&deployment{&appsv1.Deployment{}}} + _ = deployment.setReplicas(test.replicas) + if test.originalReplicas != nil { - setOriginalReplicas(*test.originalReplicas, r) + setOriginalReplicas(*test.originalReplicas, deployment) } - err := r.ScaleUp() - assert.NoError(t, err) - replicas, err := r.getReplicas() + err := deployment.ScaleUp() + require.NoError(t, err) + replicas, err := deployment.getReplicas() + if assert.NoError(t, err) { assert.Equal(t, test.wantReplicas, replicas) } - oringalReplicas, err := getOriginalReplicas(r) - assert.NoError(t, err) // Scaling set OrignialReplicas to faulty value + + oringalReplicas, err := getOriginalReplicas(deployment) + require.NoError(t, err) // Scaling set OrignialReplicas to faulty value assertIntPointerEqual(t, test.wantOriginalReplicas, oringalReplicas) }) } } func TestReplicaScaledWorkload_ScaleDown(t *testing.T) { + t.Parallel() + tests := []struct { name string replicas int32 @@ -92,20 +102,25 @@ func TestReplicaScaledWorkload_ScaleDown(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r := &replicaScaledWorkload{&deployment{&appsv1.Deployment{}}} - _ = r.setReplicas(test.replicas) + t.Parallel() + + deployment := &replicaScaledWorkload{&deployment{&appsv1.Deployment{}}} + _ = deployment.setReplicas(test.replicas) + if test.originalReplicas != nil { - setOriginalReplicas(*test.originalReplicas, r) + setOriginalReplicas(*test.originalReplicas, deployment) } - err := r.ScaleDown(0) - assert.NoError(t, err) - replicas, err := r.getReplicas() + err := deployment.ScaleDown(0) + require.NoError(t, err) + + replicas, err := deployment.getReplicas() if assert.NoError(t, err) { assert.Equal(t, test.wantReplicas, replicas) } - oringalReplicas, err := getOriginalReplicas(r) - assert.NoError(t, err) // Scaling set OrignialReplicas to faulty value + + oringalReplicas, err := getOriginalReplicas(deployment) + require.NoError(t, err) // Scaling set OrignialReplicas to faulty value assertIntPointerEqual(t, test.wantOriginalReplicas, oringalReplicas) }) } diff --git a/internal/pkg/scalable/rollouts.go b/internal/pkg/scalable/rollouts.go index 4cb797d..79d398a 100644 --- a/internal/pkg/scalable/rollouts.go +++ b/internal/pkg/scalable/rollouts.go @@ -1,3 +1,4 @@ +//nolint:dupl // this code is very similar for every resource, but its not really abstractable to avoid more duplication package scalable import ( @@ -8,44 +9,48 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getRollouts is the getResourceFunc for Argo Rollouts +// getRollouts is the getResourceFunc for Argo Rollouts. func getRollouts(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - rollouts, err := clientsets.Argo.ArgoprojV1alpha1().Rollouts(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + rollouts, err := clientsets.Argo.ArgoprojV1alpha1().Rollouts(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get rollouts: %w", err) } + results := make([]Workload, 0, len(rollouts.Items)) for i := range rollouts.Items { results = append(results, &replicaScaledWorkload{&rollout{&rollouts.Items[i]}}) } + return results, nil } -// rollout is a wrapper for argoproj.io/v1alpha1.Rollout to implement the replicaScaledResource interface +// rollout is a wrapper for argoproj.io/v1alpha1.Rollout to implement the replicaScaledResource interface. type rollout struct { *argov1alpha1.Rollout } -// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called. func (r *rollout) setReplicas(replicas int32) error { r.Spec.Replicas = &replicas return nil } -// getReplicas gets the current amount of replicas of the resource +// getReplicas gets the current amount of replicas of the resource. func (r *rollout) getReplicas() (int32, error) { replicas := r.Spec.Replicas if replicas == nil { return 0, errNoReplicasSpecified } + return *r.Spec.Replicas, nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (r *rollout) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Argo.ArgoprojV1alpha1().Rollouts(r.Namespace).Update(ctx, r.Rollout, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update rollout: %w", err) } + return nil } diff --git a/internal/pkg/scalable/scaledobjects.go b/internal/pkg/scalable/scaledobjects.go index 7b44151..846fb5f 100644 --- a/internal/pkg/scalable/scaledobjects.go +++ b/internal/pkg/scalable/scaledobjects.go @@ -6,7 +6,6 @@ import ( "strconv" "github.com/caas-team/gokubedownscaler/internal/pkg/values" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -15,52 +14,59 @@ const ( annotationKedaPausedReplicas = "autoscaling.keda.sh/paused-replicas" ) -// getScaledObjects is the getResourceFunc for Keda ScaledObjects +// getScaledObjects is the getResourceFunc for Keda ScaledObjects. func getScaledObjects(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - scaledobjects, err := clientsets.Keda.KedaV1alpha1().ScaledObjects(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + scaledobjects, err := clientsets.Keda.KedaV1alpha1().ScaledObjects(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get scaledobjects: %w", err) } + results := make([]Workload, 0, len(scaledobjects.Items)) for i := range scaledobjects.Items { results = append(results, &replicaScaledWorkload{&scaledObject{&scaledobjects.Items[i]}}) } + return results, nil } -// scaledObject is a wrapper for keda.sh/v1alpha1.ScaledObject to implement the replicaScaledResource interface +// scaledObject is a wrapper for keda.sh/v1alpha1.ScaledObject to implement the replicaScaledResource interface. type scaledObject struct { *kedav1alpha1.ScaledObject } -// setReplicas sets the pausedReplicas annotation to the specified replicas. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the pausedReplicas annotation to the specified replicas. Changes won't be made on Kubernetes until update() is called. func (s *scaledObject) setReplicas(replicas int32) error { if replicas == values.Undefined { // pausedAnnotation was not defined before workload was downscaled delete(s.Annotations, annotationKedaPausedReplicas) return nil } + if s.Annotations == nil { s.Annotations = map[string]string{} } + s.Annotations[annotationKedaPausedReplicas] = strconv.Itoa(int(replicas)) + return nil } -// getReplicas gets the current value of the pausedReplicas annotation +// getReplicas gets the current value of the pausedReplicas annotation. func (s *scaledObject) getReplicas() (int32, error) { pausedReplicasAnnotation, ok := s.Annotations[annotationKedaPausedReplicas] if !ok { return values.Undefined, nil } + pausedReplicas, err := strconv.ParseInt(pausedReplicasAnnotation, 10, 32) if err != nil { return 0, fmt.Errorf("invalid value for annotation %q: %w", annotationKedaPausedReplicas, err) } + // #nosec G115 return int32(pausedReplicas), nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (s *scaledObject) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Keda.KedaV1alpha1().ScaledObjects(s.Namespace).Update(ctx, s.ScaledObject, metav1.UpdateOptions{}) if err != nil { diff --git a/internal/pkg/scalable/stacks.go b/internal/pkg/scalable/stacks.go index 27b7919..1f0fae3 100644 --- a/internal/pkg/scalable/stacks.go +++ b/internal/pkg/scalable/stacks.go @@ -1,3 +1,4 @@ +//nolint:dupl // this code is very similar for every resource, but its not really abstractable to avoid more duplication package scalable import ( @@ -8,44 +9,48 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getStacks is the getResourceFunc for Zalando Stacks +// getStacks is the getResourceFunc for Zalando Stacks. func getStacks(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - stacks, err := clientsets.Zalando.ZalandoV1().Stacks(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + stacks, err := clientsets.Zalando.ZalandoV1().Stacks(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get stacks: %w", err) } + results := make([]Workload, 0, len(stacks.Items)) for i := range stacks.Items { results = append(results, &replicaScaledWorkload{&stack{&stacks.Items[i]}}) } + return results, nil } -// stack is a wrapper for zalando.org/v1.Stack to implement the replicaScaledResource interface +// stack is a wrapper for zalando.org/v1.Stack to implement the replicaScaledResource interface. type stack struct { *zalandov1.Stack } -// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called. func (s *stack) setReplicas(replicas int32) error { s.Spec.Replicas = &replicas return nil } -// getReplicas gets the current amount of replicas of the resource +// getReplicas gets the current amount of replicas of the resource. func (s *stack) getReplicas() (int32, error) { replicas := s.Spec.Replicas if replicas == nil { return 0, errNoReplicasSpecified } + return *s.Spec.Replicas, nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (s *stack) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Zalando.ZalandoV1().Stacks(s.Namespace).Update(ctx, s.Stack, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update stack: %w", err) } + return nil } diff --git a/internal/pkg/scalable/statefulsets.go b/internal/pkg/scalable/statefulsets.go index b1e859d..84113c5 100644 --- a/internal/pkg/scalable/statefulsets.go +++ b/internal/pkg/scalable/statefulsets.go @@ -1,3 +1,4 @@ +//nolint:dupl // this code is very similar for every resource, but its not really abstractable to avoid more duplication package scalable import ( @@ -8,44 +9,48 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getStatefulSets is the getResourceFunc for StatefulSets +// getStatefulSets is the getResourceFunc for StatefulSets. func getStatefulSets(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { - statefulsets, err := clientsets.Kubernetes.AppsV1().StatefulSets(namespace).List(ctx, metav1.ListOptions{TimeoutSeconds: &timeout}) + statefulsets, err := clientsets.Kubernetes.AppsV1().StatefulSets(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to get statefulsets: %w", err) } + results := make([]Workload, 0, len(statefulsets.Items)) for i := range statefulsets.Items { results = append(results, &replicaScaledWorkload{&statefulSet{&statefulsets.Items[i]}}) } + return results, nil } -// statefulset is a wrapper for apps/v1.StatefulSet to implement the replicaScaledResource interface +// statefulset is a wrapper for apps/v1.StatefulSet to implement the replicaScaledResource interface. type statefulSet struct { *appsv1.StatefulSet } -// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called +// setReplicas sets the amount of replicas on the resource. Changes won't be made on Kubernetes until update() is called. func (s *statefulSet) setReplicas(replicas int32) error { s.Spec.Replicas = &replicas return nil } -// getReplicas gets the current amount of replicas of the resource +// getReplicas gets the current amount of replicas of the resource. func (s *statefulSet) getReplicas() (int32, error) { replicas := s.Spec.Replicas if replicas == nil { return 0, errNoReplicasSpecified } + return *s.Spec.Replicas, nil } -// Update updates the resource with all changes made to it. It should only be called once on a resource +// Update updates the resource with all changes made to it. It should only be called once on a resource. func (s *statefulSet) Update(clientsets *Clientsets, ctx context.Context) error { _, err := clientsets.Kubernetes.AppsV1().StatefulSets(s.Namespace).Update(ctx, s.StatefulSet, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to update statefulset: %w", err) } + return nil } diff --git a/internal/pkg/scalable/suspendScaledWorkloads.go b/internal/pkg/scalable/suspendScaledWorkloads.go index 0452327..a47f1cf 100644 --- a/internal/pkg/scalable/suspendScaledWorkloads.go +++ b/internal/pkg/scalable/suspendScaledWorkloads.go @@ -4,7 +4,7 @@ import ( "context" ) -// suspendScaledResource provides all the functions needed to scale a resource which is scaled by setting a suspend field +// suspendScaledResource provides all the functions needed to scale a resource which is scaled by setting a suspend field. type suspendScaledResource interface { scalableResource // Update updates the resource with all changes made to it. It should only be called once on a resource @@ -13,18 +13,18 @@ type suspendScaledResource interface { setSuspend(suspend bool) } -// suspendScaledWorkload is a wrapper for all resources which are scaled by setting a suspend field +// suspendScaledWorkload is a wrapper for all resources which are scaled by setting a suspend field. type suspendScaledWorkload struct { suspendScaledResource } -// ScaleUp scales up the underlying suspendScaledResource +// ScaleUp scales up the underlying suspendScaledResource. func (r *suspendScaledWorkload) ScaleUp() error { r.setSuspend(false) return nil } -// ScaleDown scales down the underlying suspendScaledResource +// ScaleDown scales down the underlying suspendScaledResource. func (r *suspendScaledWorkload) ScaleDown(_ int32) error { r.setSuspend(true) return nil diff --git a/internal/pkg/scalable/suspendScaledWorkloads_test.go b/internal/pkg/scalable/suspendScaledWorkloads_test.go index 89361c4..7b90e8a 100644 --- a/internal/pkg/scalable/suspendScaledWorkloads_test.go +++ b/internal/pkg/scalable/suspendScaledWorkloads_test.go @@ -3,11 +3,13 @@ package scalable import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" batch "k8s.io/api/batch/v1" ) func TestSuspendScaledWorkload_ScaleUp(t *testing.T) { + t.Parallel() + tests := []struct { name string suspend *bool @@ -32,18 +34,22 @@ func TestSuspendScaledWorkload_ScaleUp(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cj := cronJob{&batch.CronJob{}} - cj.Spec.Suspend = test.suspend - s := suspendScaledWorkload{&cj} + t.Parallel() + + cronjob := cronJob{&batch.CronJob{}} + cronjob.Spec.Suspend = test.suspend + s := suspendScaledWorkload{&cronjob} err := s.ScaleUp() - assert.NoError(t, err) - assertBoolPointerEqual(t, test.wantSuspend, cj.Spec.Suspend) + require.NoError(t, err) + assertBoolPointerEqual(t, test.wantSuspend, cronjob.Spec.Suspend) }) } } func TestSuspendScaledWorkload_ScaleDown(t *testing.T) { + t.Parallel() + tests := []struct { name string suspend *bool @@ -68,13 +74,15 @@ func TestSuspendScaledWorkload_ScaleDown(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cj := cronJob{&batch.CronJob{}} - cj.Spec.Suspend = test.suspend - s := suspendScaledWorkload{&cj} + t.Parallel() + + cronjob := cronJob{&batch.CronJob{}} + cronjob.Spec.Suspend = test.suspend + s := suspendScaledWorkload{&cronjob} err := s.ScaleDown(0) - assert.NoError(t, err) - assertBoolPointerEqual(t, test.wantSuspend, cj.Spec.Suspend) + require.NoError(t, err) + assertBoolPointerEqual(t, test.wantSuspend, cronjob.Spec.Suspend) }) } } diff --git a/internal/pkg/scalable/util.go b/internal/pkg/scalable/util.go index f81eedb..593b83f 100644 --- a/internal/pkg/scalable/util.go +++ b/internal/pkg/scalable/util.go @@ -5,91 +5,121 @@ import ( "log/slog" "strconv" - "github.com/caas-team/gokubedownscaler/internal/pkg/values" + "github.com/caas-team/gokubedownscaler/internal/pkg/util" ) const ( annotationOriginalReplicas = "downscaler/original-replicas" ) -// FilterExcluded filters the workloads to match the includeLabels, excludedNamespaces and excludedWorkloads -func FilterExcluded(workloads []Workload, includeLabels values.RegexList, excludedNamespaces values.RegexList, excludedWorkloads values.RegexList) []Workload { - var results []Workload +// FilterExcluded filters the workloads to match the includeLabels, excludedNamespaces and excludedWorkloads. +func FilterExcluded(workloads []Workload, includeLabels, excludedNamespaces, excludedWorkloads util.RegexList) []Workload { + results := make([]Workload, 0, len(workloads)) + for _, workload := range workloads { if !isMatchingLabels(workload, includeLabels) { - slog.Debug("workload is not matching any of the specified labels, excluding it from being scanned", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + slog.Debug( + "workload is not matching any of the specified labels, excluding it from being scanned", + "workload", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + continue } + if isNamespaceExcluded(workload, excludedNamespaces) { - slog.Debug("the workloads namespace is excluded, excluding it from being scanned", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + slog.Debug( + "the workloads namespace is excluded, excluding it from being scanned", + "workload", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + continue } + if isWorkloadExcluded(workload, excludedWorkloads) { - slog.Debug("the workloads name is excluded, excluding it from being scanned", "workload", workload.GetName(), "namespace", workload.GetNamespace()) + slog.Debug( + "the workloads name is excluded, excluding it from being scanned", + "workload", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + continue } + results = append(results, workload) } - return results + + return results[:len(results):len(results)] // unallocate excess capacity } -// isMatchingLabels check if the workload is matching any of the specified labels -func isMatchingLabels(workload Workload, includeLabels values.RegexList) bool { +// isMatchingLabels check if the workload is matching any of the specified labels. +func isMatchingLabels(workload Workload, includeLabels util.RegexList) bool { if includeLabels == nil { return true } + for label, value := range workload.GetLabels() { if !includeLabels.CheckMatchesAny(fmt.Sprintf("%s=%s", label, value)) { continue } + return true } + return false } -// isNamespaceExcluded checks if the workloads namespace is excluded -func isNamespaceExcluded(workload Workload, excludedNamespaces values.RegexList) bool { +// isNamespaceExcluded checks if the workloads namespace is excluded. +func isNamespaceExcluded(workload Workload, excludedNamespaces util.RegexList) bool { if excludedNamespaces == nil { return false } + return excludedNamespaces.CheckMatchesAny(workload.GetNamespace()) } -// isWorkloadExcluded checks if the workloads name is excluded -func isWorkloadExcluded(workload Workload, excludedWorkloads values.RegexList) bool { +// isWorkloadExcluded checks if the workloads name is excluded. +func isWorkloadExcluded(workload Workload, excludedWorkloads util.RegexList) bool { if excludedWorkloads == nil { return false } + return excludedWorkloads.CheckMatchesAny(workload.GetName()) } -// setOriginalReplicas sets the original replicas annotation on the workload +// setOriginalReplicas sets the original replicas annotation on the workload. func setOriginalReplicas(originalReplicas int32, workload Workload) { annotations := workload.GetAnnotations() if annotations == nil { annotations = map[string]string{} } + annotations[annotationOriginalReplicas] = strconv.Itoa(int(originalReplicas)) workload.SetAnnotations(annotations) } -// getOriginalReplicas gets the original replicas annotation on the workload. nil is undefined +// getOriginalReplicas gets the original replicas annotation on the workload. nil is undefined. func getOriginalReplicas(workload Workload) (*int32, error) { annotations := workload.GetAnnotations() + originalReplicasString, ok := annotations[annotationOriginalReplicas] if !ok { - return nil, nil + return nil, nil //nolint: nilnil // should get fixed along with https://github.com/caas-team/GoKubeDownscaler/issues/7 } + originalReplicas, err := strconv.ParseInt(originalReplicasString, 10, 32) if err != nil { return nil, fmt.Errorf("failed to parse original replicas annotation on workload: %w", err) } + // #nosec G115 result := int32(originalReplicas) + return &result, nil } -// removeOriginalReplicas removes the annotationOriginalReplicas from the workload +// removeOriginalReplicas removes the annotationOriginalReplicas from the workload. func removeOriginalReplicas(workload Workload) { annotations := workload.GetAnnotations() delete(annotations, annotationOriginalReplicas) diff --git a/internal/pkg/scalable/util_test.go b/internal/pkg/scalable/util_test.go index c523e94..0910a04 100644 --- a/internal/pkg/scalable/util_test.go +++ b/internal/pkg/scalable/util_test.go @@ -4,13 +4,15 @@ import ( "regexp" "testing" - "github.com/caas-team/gokubedownscaler/internal/pkg/values" + "github.com/caas-team/gokubedownscaler/internal/pkg/util" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestFilterExcluded(t *testing.T) { + t.Parallel() + // define some example objects to use type ns struct { deployment1 Workload @@ -51,9 +53,9 @@ func TestFilterExcluded(t *testing.T) { tests := []struct { name string workloads []Workload - includeLabels values.RegexList - excludedNamespaces values.RegexList - excludedWorkloads values.RegexList + includeLabels util.RegexList + excludedNamespaces util.RegexList + excludedWorkloads util.RegexList want []Workload }{ { @@ -67,7 +69,7 @@ func TestFilterExcluded(t *testing.T) { { name: "includeLabels", workloads: []Workload{ns1.deployment1, ns1.deployment2, ns1.labeledDeployment}, - includeLabels: values.RegexList{regexp.MustCompile(".*")}, // match any label + includeLabels: util.RegexList{regexp.MustCompile(".*")}, // match any label excludedNamespaces: nil, excludedWorkloads: nil, want: []Workload{ns1.labeledDeployment}, @@ -76,7 +78,7 @@ func TestFilterExcluded(t *testing.T) { name: "excludeNamespaces", workloads: []Workload{ns1.deployment1, ns1.deployment2, ns2.deployment1}, includeLabels: nil, - excludedNamespaces: values.RegexList{regexp.MustCompile("Namespace1")}, // exclude Namespace1 + excludedNamespaces: util.RegexList{regexp.MustCompile("Namespace1")}, // exclude Namespace1 excludedWorkloads: nil, want: []Workload{ns2.deployment1}, }, @@ -85,12 +87,15 @@ func TestFilterExcluded(t *testing.T) { workloads: []Workload{ns1.deployment1, ns1.deployment2, ns2.deployment1}, includeLabels: nil, excludedNamespaces: nil, - excludedWorkloads: values.RegexList{regexp.MustCompile("Deployment1")}, // exclude Deployment1 + excludedWorkloads: util.RegexList{regexp.MustCompile("Deployment1")}, // exclude Deployment1 want: []Workload{ns1.deployment2}, }, } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + got := FilterExcluded(test.workloads, test.includeLabels, test.excludedNamespaces, test.excludedWorkloads) assert.Equal(t, test.want, got) }) diff --git a/internal/pkg/scalable/workload.go b/internal/pkg/scalable/workload.go index 978064e..d02a9e7 100644 --- a/internal/pkg/scalable/workload.go +++ b/internal/pkg/scalable/workload.go @@ -3,6 +3,7 @@ package scalable import ( "context" "errors" + "fmt" argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" keda "github.com/kedacore/keda/v2/pkg/generated/clientset/versioned" @@ -15,29 +16,43 @@ import ( ) var ( - timeout int64 = 30 - errNoReplicasSpecified = errors.New("error: workload has no replicas set") + errResourceNotSupported = errors.New("error: specified rescource type is not supported") + errNoReplicasSpecified = errors.New("error: workload has no replicas set") ) -// getResourceFunc is a function that gets a specific resource as a Workload +// getResourceFunc is a function that gets a specific resource as a Workload. type getResourceFunc func(namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) -// GetWorkloads maps the resource name to an implementation specific getResourceFunc -var GetWorkloads = map[string]getResourceFunc{ - "deployments": getDeployments, - "statefulsets": getStatefulSets, - "cronjobs": getCronJobs, - "jobs": getJobs, - "daemonsets": getDaemonSets, - "poddisruptionbudgets": getPodDisruptionBudgets, - "horizontalpodautoscalers": getHorizontalPodAutoscalers, - "scaledobjects": getScaledObjects, - "rollouts": getRollouts, - "stacks": getStacks, - "prometheuses": getPrometheuses, +// GetWorkloads gets all workloads of the given resource in the cluster. +func GetWorkloads(resource, namespace string, clientsets *Clientsets, ctx context.Context) ([]Workload, error) { + resourceFuncMap := map[string]getResourceFunc{ + "deployments": getDeployments, + "statefulsets": getStatefulSets, + "cronjobs": getCronJobs, + "jobs": getJobs, + "daemonsets": getDaemonSets, + "poddisruptionbudgets": getPodDisruptionBudgets, + "horizontalpodautoscalers": getHorizontalPodAutoscalers, + "scaledobjects": getScaledObjects, + "rollouts": getRollouts, + "stacks": getStacks, + "prometheuses": getPrometheuses, + } + + resourceFunc, exists := resourceFuncMap[resource] + if !exists { + return nil, errResourceNotSupported + } + + workloads, err := resourceFunc(namespace, clientsets, ctx) + if err != nil { + return nil, fmt.Errorf("failed to get workloads of type %q: %w", resource, err) + } + + return workloads, nil } -// scalableResource provides all functions needed to scale any type of resource +// scalableResource provides all functions needed to scale any type of resource. type scalableResource interface { // GetAnnotations gets the annotations of the resource GetAnnotations() map[string]string @@ -57,7 +72,7 @@ type scalableResource interface { SetAnnotations(annotations map[string]string) } -// Workload provides all functions needed to scale the workload +// Workload provides all functions needed to scale the workload. type Workload interface { scalableResource // Update updates the resource with all changes made to it. It should only be called once on a resource diff --git a/internal/pkg/util/config.go b/internal/pkg/util/config.go new file mode 100644 index 0000000..ab69bd6 --- /dev/null +++ b/internal/pkg/util/config.go @@ -0,0 +1,112 @@ +package util + +import ( + "flag" + "fmt" + "time" +) + +// RuntimeConfiguration represents the runtime configuration for the downscaler. +type RuntimeConfiguration struct { + // DryRun sets if the downscaler should take actions or just print them out. + DryRun bool + // Debug sets if debug information should be printed. + Debug bool + // Once sets if the scan should only run once. + Once bool + // Interval sets how long to wait between scans. + Interval time.Duration + // IncludeNamespaces sets the list of namespaces to restrict the downscaler to. + IncludeNamespaces []string + // IncludeResources sets the list of resources to restrict the downscaler to. + IncludeResources []string + // ExcludeNamespaces sets the list of namespaces to ignore while downscaling. + ExcludeNamespaces RegexList + // ExcludeWorkloads sets the list of workload names to ignore while downscaling. + ExcludeWorkloads RegexList + // IncludeLabels sets the list of labels workloads have to match one of to be scaled. + IncludeLabels RegexList + // TimeAnnotation sets the annotation used for grace-period instead of creation time. + TimeAnnotation string + // Kubeconfig sets an optional kubeconfig to use for testing purposes instead of the in-cluster config. + Kubeconfig string +} + +// ParseConfigFlags sets all cli flags required for the runtime configuration. +func (c *RuntimeConfiguration) ParseConfigFlags() { + flag.BoolVar( + &c.DryRun, + "dry-run", + false, + "print actions instead of doing them. enables debug logs (default: false)", + ) + flag.BoolVar( + &c.Debug, + "debug", + false, + "print more debug information (default: false)", + ) + flag.BoolVar( + &c.Once, + "once", + false, + "run scan only once (default: false)", + ) + flag.Var( + (*DurationValue)(&c.Interval), + "interval", + "time between scans (default: 30s)", + ) + flag.Var( + (*StringListValue)(&c.IncludeNamespaces), + "namespace", + "restrict the downscaler to the specified namespaces (default: all)", + ) + flag.Var( + (*StringListValue)(&c.IncludeResources), + "include-resources", + "restricts the downscaler to the specified resource types (default: deployments)", + ) + flag.Var( + &c.ExcludeNamespaces, + "exclude-namespaces", + "exclude namespaces from being scaled (default: kube-system,kube-downscaler)", + ) + flag.Var( + &c.ExcludeWorkloads, + "exclude-deployments", + "exclude deployments from being scaled (optional)", + ) + flag.Var( + &c.IncludeLabels, + "matching-labels", + "restricts the downscaler to workloads with these labels (default: all)", + ) + flag.StringVar( + &c.Kubeconfig, + "k", + "", + "kubeconfig to use instead of the in-cluster config (optional)", + ) + flag.StringVar( + &c.TimeAnnotation, + "deployment-time-annotation", + "", + "the annotation to use instead of creation time for grace period (optional)", + ) +} + +// ParseConfigEnvVars parses all environment variables for the runtime configuration. +func (c *RuntimeConfiguration) ParseConfigEnvVars() error { + err := GetEnvValue("EXCLUDE_NAMESPACES", &c.ExcludeNamespaces) + if err != nil { + return fmt.Errorf("error while getting EXCLUDE_NAMESPACES environment variable: %w", err) + } + + err = GetEnvValue("EXCLUDE_DEPLOYMENTS", &c.ExcludeWorkloads) + if err != nil { + return fmt.Errorf("error while getting EXCLUDE_DEPLOYMENTS environment variable: %w", err) + } + + return nil +} diff --git a/internal/pkg/values/durationValue.go b/internal/pkg/util/durationValue.go similarity index 78% rename from internal/pkg/values/durationValue.go rename to internal/pkg/util/durationValue.go index cbca185..39d8cd7 100644 --- a/internal/pkg/values/durationValue.go +++ b/internal/pkg/util/durationValue.go @@ -1,4 +1,4 @@ -package values +package util import ( "fmt" @@ -6,10 +6,10 @@ import ( "time" ) -// DurationValue is an alias for time.DurationValue with a Set function that allows for durations without a unit +// DurationValue is an alias for time.DurationValue with a Set function that allows for durations without a unit. type DurationValue time.Duration -// Set converts the string value into a duration +// Set converts the string value into a duration. func (d *DurationValue) Set(value string) error { // try parsing as integer seconds seconds, err := strconv.Atoi(value) @@ -25,9 +25,10 @@ func (d *DurationValue) Set(value string) error { } *d = DurationValue(duration) + return nil } func (d *DurationValue) String() string { - return fmt.Sprint(time.Duration(*d).String()) + return time.Duration(*d).String() } diff --git a/internal/pkg/util/env.go b/internal/pkg/util/env.go new file mode 100644 index 0000000..746ae96 --- /dev/null +++ b/internal/pkg/util/env.go @@ -0,0 +1,19 @@ +package util + +import ( + "flag" + "fmt" + "os" +) + +// GetEnvValue gets the env value and puts it in flag.Value. +func GetEnvValue(key string, value flag.Value) error { + if val, ok := os.LookupEnv(key); ok { + err := value.Set(val) + if err != nil { + return fmt.Errorf("failed to set value: %w", err) + } + } + + return nil +} diff --git a/internal/pkg/values/int32Value.go b/internal/pkg/util/int32Value.go similarity index 95% rename from internal/pkg/values/int32Value.go rename to internal/pkg/util/int32Value.go index a4c3cc4..f0eec43 100644 --- a/internal/pkg/values/int32Value.go +++ b/internal/pkg/util/int32Value.go @@ -1,4 +1,4 @@ -package values +package util import ( "fmt" @@ -14,6 +14,7 @@ func (i *Int32Value) Set(s string) error { } // #nosec G115 *i = Int32Value(v) + return nil } diff --git a/internal/pkg/values/regexList.go b/internal/pkg/util/regexList.go similarity index 97% rename from internal/pkg/values/regexList.go rename to internal/pkg/util/regexList.go index 55e724c..2ba7c14 100644 --- a/internal/pkg/values/regexList.go +++ b/internal/pkg/util/regexList.go @@ -1,4 +1,4 @@ -package values +package util import ( "fmt" @@ -11,14 +11,18 @@ type RegexList []*regexp.Regexp func (r *RegexList) Set(text string) error { entries := strings.Split(text, ",") *r = make(RegexList, 0, len(entries)) + for _, entry := range entries { entry = strings.TrimSpace(entry) + re, err := regexp.Compile(entry) if err != nil { return fmt.Errorf("failed to compile stringlist entry as a regex: %w", err) } + *r = append(*r, re) } + return nil } @@ -32,5 +36,6 @@ func (r RegexList) CheckMatchesAny(text string) bool { return true } } + return false } diff --git a/internal/pkg/util/resourceLogger.go b/internal/pkg/util/resourceLogger.go new file mode 100644 index 0000000..9c324a5 --- /dev/null +++ b/internal/pkg/util/resourceLogger.go @@ -0,0 +1,10 @@ +package util + +import "context" + +type ResourceLogger interface { + // ErrorInvalidAnnotation adds an invalid annotation error on a resource + ErrorInvalidAnnotation(id string, message string, ctx context.Context) + // ErrorIncompatibleFields adds an incompatible fields error on a resource + ErrorIncompatibleFields(message string, ctx context.Context) +} diff --git a/internal/pkg/values/stringlistValue.go b/internal/pkg/util/stringlistValue.go similarity index 78% rename from internal/pkg/values/stringlistValue.go rename to internal/pkg/util/stringlistValue.go index d463641..9809df0 100644 --- a/internal/pkg/values/stringlistValue.go +++ b/internal/pkg/util/stringlistValue.go @@ -1,19 +1,21 @@ -package values +package util import ( "fmt" "strings" ) -// StringListValue is an alias for []string with a Set funciton for the flag package +// StringListValue is an alias for []string with a Set function for the flag package. type StringListValue []string func (s *StringListValue) Set(text string) error { entries := strings.Split(text, ",") *s = make(StringListValue, 0, len(entries)) + for _, entry := range entries { *s = append(*s, strings.TrimSpace(entry)) } + return nil } diff --git a/internal/pkg/values/layer.go b/internal/pkg/values/layer.go index d01f507..8855db0 100644 --- a/internal/pkg/values/layer.go +++ b/internal/pkg/values/layer.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "time" + + "github.com/caas-team/gokubedownscaler/internal/pkg/util" ) var ( @@ -18,17 +20,17 @@ var ( const Undefined = -1 // Undefined represents an undefined integer value -// scaling is an enum that describes the current scaling -type scaling int +// Scaling is an enum that describes the current Scaling. +type Scaling int const ( - ScalingNone scaling = iota // no scaling set in this layer, go to next layer + ScalingNone Scaling = iota // no scaling set in this layer, go to next layer ScalingIgnore // not scaling ScalingDown // scaling down ScalingUp // scaling up ) -// NewLayer gets a new layer with the default values +// NewLayer gets a new layer with the default values. func NewLayer() Layer { return Layer{ DownscaleReplicas: Undefined, @@ -36,7 +38,7 @@ func NewLayer() Layer { } } -// Layer represents a value Layer +// Layer represents a value Layer. type Layer struct { DownscalePeriod timeSpans // periods to downscale in DownTime timeSpans // within these timespans workloads will be scaled down, outside of them they will be scaled up @@ -50,19 +52,21 @@ type Layer struct { GracePeriod time.Duration // grace period until new workloads will be scaled down } -// isScalingExcluded checks if scaling is excluded, nil represents a not set state -func (l Layer) isScalingExcluded() *bool { +// isScalingExcluded checks if scaling is excluded, nil represents a not set state. +func (l *Layer) isScalingExcluded() *bool { if l.Exclude.isSet { return &l.Exclude.value } + if ok := l.ExcludeUntil.After(time.Now()); ok { return &ok } + return nil } -// CheckForIncompatibleFields checks if there are incompatible fields -func (l Layer) CheckForIncompatibleFields() error { +// CheckForIncompatibleFields checks if there are incompatible fields. +func (l *Layer) CheckForIncompatibleFields() error { //nolint: cyclop // this is still fine to read, we could defnitly consider refactoring this in the future // force down and uptime if l.ForceDowntime.isSet && l.ForceDowntime.value && @@ -83,22 +87,26 @@ func (l Layer) CheckForIncompatibleFields() error { (l.UpscalePeriod != nil || l.DownscalePeriod != nil) { return errTimeAndPeriod } + return nil } -// getCurrentScaling gets the current scaling, not checking for incompatibility -func (l Layer) getCurrentScaling() scaling { +// getCurrentScaling gets the current scaling, not checking for incompatibility. +func (l *Layer) getCurrentScaling() Scaling { // check times if l.DownTime != nil { if l.DownTime.inTimeSpans() { return ScalingDown } + return ScalingUp } + if l.UpTime != nil { if l.UpTime.inTimeSpans() { return ScalingUp } + return ScalingDown } @@ -107,31 +115,36 @@ func (l Layer) getCurrentScaling() scaling { if l.DownscalePeriod.inTimeSpans() { return ScalingDown } + if l.UpscalePeriod.inTimeSpans() { return ScalingUp } + return ScalingIgnore } return ScalingNone } -// getForcedScaling checks if the layer has forced scaling enabled and returns the matching scaling -func (l Layer) getForcedScaling() scaling { - var forcedScaling scaling +// getForcedScaling checks if the layer has forced scaling enabled and returns the matching scaling. +func (l *Layer) getForcedScaling() Scaling { + var forcedScaling Scaling + if l.ForceDowntime.isSet && l.ForceDowntime.value { forcedScaling = ScalingDown } + if l.ForceUptime.isSet && l.ForceUptime.value { forcedScaling = ScalingUp } + return forcedScaling } -type Layers []Layer +type Layers []*Layer -// GetCurrentScaling gets the current scaling of the first layer that implements scaling -func (l Layers) GetCurrentScaling() scaling { +// GetCurrentScaling gets the current scaling of the first layer that implements scaling. +func (l Layers) GetCurrentScaling() Scaling { // check for forced scaling for _, layer := range l { forcedScaling := layer.getForcedScaling() @@ -145,13 +158,14 @@ func (l Layers) GetCurrentScaling() scaling { if layerScaling == ScalingNone { continue } + return layerScaling } return ScalingNone } -// GetDownscaleReplicas gets the downscale replicas of the first layer that implements downscale replicas +// GetDownscaleReplicas gets the downscale replicas of the first layer that implements downscale replicas. func (l Layers) GetDownscaleReplicas() (int32, error) { for _, layer := range l { downscaleReplicas := layer.DownscaleReplicas @@ -161,10 +175,11 @@ func (l Layers) GetDownscaleReplicas() (int32, error) { return downscaleReplicas, nil } + return 0, errValueNotSet } -// GetExcluded checks if any layer excludes scaling +// GetExcluded checks if any layer excludes scaling. func (l Layers) GetExcluded() bool { for _, layer := range l { excluded := layer.isScalingExcluded() @@ -174,19 +189,31 @@ func (l Layers) GetExcluded() bool { return *excluded } + return false } -// IsInGracePeriod gets the grace period of the uppermost layer that has it set -func (l Layers) IsInGracePeriod(timeAnnotation string, workloadAnnotations map[string]string, creationTime time.Time, logEvent resourceLogger, ctx context.Context) (bool, error) { +// IsInGracePeriod gets the grace period of the uppermost layer that has it set. +func (l Layers) IsInGracePeriod( + timeAnnotation string, + workloadAnnotations map[string]string, + creationTime time.Time, + logEvent util.ResourceLogger, + ctx context.Context, +) (bool, error) { + var err error var gracePeriod time.Duration = Undefined + for _, layer := range l { if layer.GracePeriod == Undefined { continue } + gracePeriod = layer.GracePeriod + break } + if gracePeriod == Undefined { return false, nil } @@ -197,13 +224,17 @@ func (l Layers) IsInGracePeriod(timeAnnotation string, workloadAnnotations map[s logEvent.ErrorInvalidAnnotation(timeAnnotation, fmt.Sprintf("annotation %q not present on this workload", timeAnnotation), ctx) return false, errAnnotationNotSet } - var err error + creationTime, err = time.Parse(time.RFC3339, timeString) if err != nil { - logEvent.ErrorInvalidAnnotation(timeAnnotation, fmt.Sprintf("failed to parse %q annotation as RFC3339 timestamp: %s", timeAnnotation, err.Error()), ctx) - return false, fmt.Errorf("failed to parse timestamp in annotation: %w", err) + err = fmt.Errorf("failed to parse %q annotation as RFC3339 timestamp: %w", timeAnnotation, err) + logEvent.ErrorInvalidAnnotation(timeAnnotation, err.Error(), ctx) + + return false, err } } - gracePeriodUntil := creationTime.Add(time.Duration(gracePeriod)) + + gracePeriodUntil := creationTime.Add(gracePeriod) + return time.Now().Before(gracePeriodUntil), nil } diff --git a/internal/pkg/values/layerParser.go b/internal/pkg/values/layerParser.go new file mode 100644 index 0000000..cfbcf34 --- /dev/null +++ b/internal/pkg/values/layerParser.go @@ -0,0 +1,224 @@ +package values + +import ( + "context" + "flag" + "fmt" + "strconv" + "time" + + "github.com/caas-team/gokubedownscaler/internal/pkg/util" +) + +const ( + annotationDownscalePeriod = "downscaler/downscale-period" + annotationDowntime = "downscaler/downtime" + annotationUpscalePeriod = "downscaler/upscale-period" + annotationUptime = "downscaler/uptime" + annotationExclude = "downscaler/exclude" + annotationExcludeUntil = "downscaler/exclude-until" + annotationForceUptime = "downscaler/force-uptime" + annotationForceDowntime = "downscaler/force-downtime" + annotationDownscaleReplicas = "downscaler/downscale-replicas" + annotationGracePeriod = "downscaler/grace-period" + + envUpscalePeriod = "UPSCALE_PERIOD" + envUptime = "DEFAULT_UPTIME" + envDownscalePeriod = "DOWNSCALE_PERIOD" + envDowntime = "DEFAULT_DOWNTIME" +) + +// ParseLayerFlags sets all flags corresponding to layer values to fill into l. +func (l *Layer) ParseLayerFlags() { + flag.Var( + &l.DownscalePeriod, + "downscale-period", + "period to scale down in (default: never, incompatible: UpscaleTime, DownscaleTime)", + ) + flag.Var( + &l.DownTime, + "default-downtime", + `timespans where workloads will be scaled down. + outside of them they will be scaled up. + (default: never, incompatible: UpscalePeriod, DownscalePeriod)`, + ) + flag.Var( + &l.UpscalePeriod, + "upscale-period", + "periods to scale up in (default: never, incompatible: UpscaleTime, DownscaleTime)", + ) + flag.Var( + &l.UpTime, + "default-uptime", + `timespans where workloads will be scaled up. + outside of them they will be scaled down. + (default: never, incompatible: UpscalePeriod, DownscalePeriod)`, + ) + flag.Var( + &l.Exclude, + "explicit-include", + "sets exclude on cli layer to true, makes it so namespaces or deployments have to specify downscaler/exclude=false (default: false)", + ) + flag.Var( + (*util.Int32Value)(&l.DownscaleReplicas), + "downtime-replicas", + "the replicas to scale down to (default: 0)", + ) + flag.Var( + (*util.DurationValue)(&l.GracePeriod), + "grace-period", + "the grace period between creation of workload until first downscale (default: 15min)", + ) +} + +// GetLayerFromEnv fills l with all values from environment variables and checks for compatibility. +func (l *Layer) GetLayerFromEnv() error { + err := util.GetEnvValue(envUpscalePeriod, &l.UpscalePeriod) + if err != nil { + return fmt.Errorf("error while getting %q environment variable: %w", envUpscalePeriod, err) + } + + err = util.GetEnvValue(envUptime, &l.UpTime) + if err != nil { + return fmt.Errorf("error while getting %q environment variable: %w", envUptime, err) + } + + err = util.GetEnvValue(envDownscalePeriod, &l.DownscalePeriod) + if err != nil { + return fmt.Errorf("error while getting %q environment variable: %w", envDownscalePeriod, err) + } + + err = util.GetEnvValue(envDowntime, &l.DownTime) + if err != nil { + return fmt.Errorf("error while getting %q environment variable: %w", envDowntime, err) + } + + if err = l.CheckForIncompatibleFields(); err != nil { + return fmt.Errorf("error: found incompatible fields: %w", err) + } + + return nil +} + +// GetLayerFromAnnotations fills l with all values from the annotations and checks for compatibility. +func (l *Layer) GetLayerFromAnnotations( //nolint: funlen,gocognit,gocyclo,cyclop // it is a big function and we can refactor it a bit but it should be fine for now + annotations map[string]string, + logEvent util.ResourceLogger, + ctx context.Context, +) error { + var err error + + if downscalePeriod, ok := annotations[annotationDownscalePeriod]; ok { + err = l.DownscalePeriod.Set(downscalePeriod) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationDownscalePeriod, err) + logEvent.ErrorInvalidAnnotation(annotationDownscalePeriod, err.Error(), ctx) + + return err + } + } + + if downtime, ok := annotations[annotationDowntime]; ok { + err = l.DownTime.Set(downtime) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationDowntime, err) + logEvent.ErrorInvalidAnnotation(annotationDowntime, err.Error(), ctx) + + return err + } + } + + if upscalePeriod, ok := annotations[annotationUpscalePeriod]; ok { + err = l.UpscalePeriod.Set(upscalePeriod) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationUpscalePeriod, err) + logEvent.ErrorInvalidAnnotation(annotationUpscalePeriod, err.Error(), ctx) + + return fmt.Errorf("failed to parse %q annotation: %w", annotationUpscalePeriod, err) + } + } + + if uptime, ok := annotations[annotationUptime]; ok { + err = l.UpTime.Set(uptime) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationUptime, err) + logEvent.ErrorInvalidAnnotation(annotationUptime, err.Error(), ctx) + + return err + } + } + + if exclude, ok := annotations[annotationExclude]; ok { + err = l.Exclude.Set(exclude) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationExclude, err) + logEvent.ErrorInvalidAnnotation(annotationExclude, err.Error(), ctx) + + return err + } + } + + if excludeUntil, ok := annotations[annotationExcludeUntil]; ok { + l.ExcludeUntil, err = time.Parse(time.RFC3339, excludeUntil) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationExcludeUntil, err) + logEvent.ErrorInvalidAnnotation(annotationExcludeUntil, err.Error(), ctx) + + return err + } + } + + if forceUptime, ok := annotations[annotationForceUptime]; ok { + err = l.ForceUptime.Set(forceUptime) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationForceUptime, err) + logEvent.ErrorInvalidAnnotation(annotationForceUptime, err.Error(), ctx) + + return err + } + } + + if forceDowntime, ok := annotations[annotationForceDowntime]; ok { + err = l.ForceDowntime.Set(forceDowntime) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationForceDowntime, err) + logEvent.ErrorInvalidAnnotation(annotationForceDowntime, err.Error(), ctx) + + return err + } + } + + if downscaleReplicasString, ok := annotations[annotationDownscaleReplicas]; ok { + var downscaleReplicas int64 + + downscaleReplicas, err = strconv.ParseInt(downscaleReplicasString, 10, 32) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationDownscaleReplicas, err) + logEvent.ErrorInvalidAnnotation(annotationDownscaleReplicas, err.Error(), ctx) + + return err + } + + // #nosec G115 // downscaleReplicas gets parsed as a 32 bit integer, so any errors that could be thrown here are already handled above + l.DownscaleReplicas = int32(downscaleReplicas) + } + + if gracePeriod, ok := annotations[annotationGracePeriod]; ok { + err = (*util.DurationValue)(&l.GracePeriod).Set(gracePeriod) + if err != nil { + err = fmt.Errorf("failed to parse %q annotation: %w", annotationGracePeriod, err) + logEvent.ErrorInvalidAnnotation(annotationGracePeriod, err.Error(), ctx) + + return err + } + } + + if err = l.CheckForIncompatibleFields(); err != nil { + err = fmt.Errorf("error: found incompatible fields: %w", err) + logEvent.ErrorIncompatibleFields(err.Error(), ctx) + + return err + } + + return nil +} diff --git a/internal/pkg/values/layer_test.go b/internal/pkg/values/layer_test.go index bb7e3fe..150e8ad 100644 --- a/internal/pkg/values/layer_test.go +++ b/internal/pkg/values/layer_test.go @@ -8,6 +8,8 @@ import ( ) func TestLayer_checkForIncompatibleFields(t *testing.T) { + t.Parallel() + tests := []struct { name string layer Layer @@ -80,6 +82,8 @@ func TestLayer_checkForIncompatibleFields(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + err := test.layer.CheckForIncompatibleFields() if test.wantErr { assert.Error(t, err) @@ -91,6 +95,7 @@ func TestLayer_checkForIncompatibleFields(t *testing.T) { } func TestLayer_getCurrentScaling(t *testing.T) { + t.Parallel() var ( inTimeSpan = timeSpans{absoluteTimeSpan{ from: time.Now().Add(-time.Hour), @@ -105,7 +110,7 @@ func TestLayer_getCurrentScaling(t *testing.T) { tests := []struct { name string layer Layer - wantScaling scaling + wantScaling Scaling }{ { name: "in downtime", @@ -172,6 +177,8 @@ func TestLayer_getCurrentScaling(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + scaling := test.layer.getCurrentScaling() assert.Equal(t, test.wantScaling, scaling) }) diff --git a/internal/pkg/values/timespan.go b/internal/pkg/values/timespan.go index 187ec82..4a90b59 100644 --- a/internal/pkg/values/timespan.go +++ b/internal/pkg/values/timespan.go @@ -15,33 +15,36 @@ var ( errTimeOfDayOutOfRange = errors.New("error: the time of day has fields that are out of rane") ) -// rfc339Regex is a regex that matches an rfc339 timestamp +// rfc339Regex is a regex that matches an rfc339 timestamp. const rfc3339Regex = `(.+Z|.+[+-]\d{2}:\d{2})` -// absoluteTimeSpanRegex matches a absolute timespan. It's groups are the two rfc3339 timestamps +// absoluteTimeSpanRegex matches a absolute timespan. It's groups are the two rfc3339 timestamps. var absoluteTimeSpanRegex = regexp.MustCompile(fmt.Sprintf(`^%s *- *%s$`, rfc3339Regex, rfc3339Regex)) type TimeSpan interface { - // inTimeSpan checks if time is in the timespan or not - isTimeInSpan(time.Time) bool + // isTimeInSpan checks if time is in the timespan or not + isTimeInSpan(time time.Time) bool } type timeSpans []TimeSpan -// inTimeSpans checks if current time is in one of the timespans or not +// inTimeSpans checks if current time is in one of the timespans or not. func (t *timeSpans) inTimeSpans() bool { for _, timespan := range *t { if !timespan.isTimeInSpan(time.Now()) { continue } + return true } + return false } func (t *timeSpans) Set(value string) error { spans := strings.Split(value, ",") timespans := make([]TimeSpan, 0, len(spans)) + for _, timespanText := range spans { timespanText = strings.TrimSpace(timespanText) @@ -51,7 +54,9 @@ func (t *timeSpans) Set(value string) error { if err != nil { return fmt.Errorf("failed to parse absolute timespan: %w", err) } + timespans = append(timespans, timespan) + continue } @@ -60,9 +65,12 @@ func (t *timeSpans) Set(value string) error { if err != nil { return fmt.Errorf("failed to parse relative timespan: %w", err) } + timespans = append(timespans, timespan) } + *t = timeSpans(timespans) + return nil } @@ -70,7 +78,7 @@ func (t *timeSpans) String() string { return fmt.Sprint(*t) } -// parseAbsoluteTimespans parses an absolute timespan. will panic if timespan is not an absolute timespan +// parseAbsoluteTimespans parses an absolute timespan. will panic if timespan is not an absolute timespan. func parseAbsoluteTimeSpan(timespan string) (absoluteTimeSpan, error) { timestamps := absoluteTimeSpanRegex.FindStringSubmatch(timespan)[1:] @@ -78,6 +86,7 @@ func parseAbsoluteTimeSpan(timespan string) (absoluteTimeSpan, error) { if err != nil { return absoluteTimeSpan{}, fmt.Errorf("failed to parse rfc3339 timestamp: %w", err) } + toTime, err := time.Parse(time.RFC3339, strings.TrimSpace(timestamps[1])) if err != nil { return absoluteTimeSpan{}, fmt.Errorf("failed to parse rfc3339 timestamp: %w", err) @@ -90,6 +99,7 @@ func parseAbsoluteTimeSpan(timespan string) (absoluteTimeSpan, error) { } func parseRelativeTimeSpan(timespanString string) (*relativeTimeSpan, error) { + var err error timespan := relativeTimeSpan{} parts := strings.Split(timespanString, " ") @@ -101,13 +111,14 @@ func parseRelativeTimeSpan(timespanString string) (*relativeTimeSpan, error) { if len(weekdaySpan) != 2 { return nil, errRelativeTimespanInvalid } + timeSpan := strings.Split(parts[1], "-") if len(timeSpan) != 2 { return nil, errRelativeTimespanInvalid } + timezone := parts[2] - var err error timespan.timezone, err = time.LoadLocation(timezone) if err != nil { return nil, fmt.Errorf("failed to parse timezone: %w", err) @@ -117,6 +128,7 @@ func parseRelativeTimeSpan(timespanString string) (*relativeTimeSpan, error) { if err != nil { return nil, fmt.Errorf("failed to parse time of day from: %w", err) } + timespan.timeTo, err = parseDayTime(timeSpan[1], timespan.timezone) if err != nil { return nil, fmt.Errorf("failed to parse time of day to: %w", err) @@ -126,6 +138,7 @@ func parseRelativeTimeSpan(timespanString string) (*relativeTimeSpan, error) { if err != nil { return nil, fmt.Errorf("failed to parse 'weekdayFrom': %w", err) } + timespan.weekdayTo, err = getWeekday(weekdaySpan[1]) if err != nil { return nil, fmt.Errorf("failed to parse 'weekdayTo': %w", err) @@ -142,27 +155,30 @@ type relativeTimeSpan struct { timeTo time.Time } -// isWeekdayInRange checks if the weekday falls into the weekday range +// isWeekdayInRange checks if the weekday falls into the weekday range. func (t relativeTimeSpan) isWeekdayInRange(weekday time.Weekday) bool { if t.weekdayFrom <= t.weekdayTo { // check if range wraps across weeks return weekday >= t.weekdayFrom && weekday <= t.weekdayTo } + return weekday >= t.weekdayFrom || weekday <= t.weekdayTo } -// isTimeOfDayInRange checks if the time falls into the time of day range +// isTimeOfDayInRange checks if the time falls into the time of day range. func (t relativeTimeSpan) isTimeOfDayInRange(timeOfDay time.Time) bool { if t.timeFrom.After(t.timeTo) { // check if range wraps across days return timeOfDay.After(t.timeFrom) || timeOfDay.Equal(t.timeFrom) || timeOfDay.Before(t.timeTo) } + return (t.timeFrom.Before(timeOfDay) || t.timeFrom.Equal(timeOfDay)) && t.timeTo.After(timeOfDay) } -// isTimeInSpan check if the time is in the span +// isTimeInSpan check if the time is in the span. func (t relativeTimeSpan) isTimeInSpan(targetTime time.Time) bool { targetTime = targetTime.In(t.timezone) timeOfDay := getTimeOfDay(targetTime) weekday := targetTime.Weekday() + return t.isTimeOfDayInRange(timeOfDay) && t.isWeekdayInRange(weekday) } @@ -171,17 +187,17 @@ type absoluteTimeSpan struct { to time.Time } -// isTimeInSpan check if the time is in the span +// isTimeInSpan check if the time is in the span. func (t absoluteTimeSpan) isTimeInSpan(targetTime time.Time) bool { return (t.from.Before(targetTime) || t.from.Equal(targetTime)) && t.to.After(targetTime) } -// isAbsoluteTimestamp checks if timestamp string is absolute +// isAbsoluteTimestamp checks if timestamp string is absolute. func isAbsoluteTimestamp(timestamp string) bool { return absoluteTimeSpanRegex.MatchString(timestamp) } -// getWeekday gets the weekday from the given string +// getWeekday gets the weekday from the given string. func getWeekday(weekday string) (time.Weekday, error) { weekdays := map[string]time.Weekday{ "sun": time.Sunday, @@ -200,27 +216,32 @@ func getWeekday(weekday string) (time.Weekday, error) { return 0, errInvalidWeekday } -// parseDayTime parses the given time of day string to a zero date time +// parseDayTime parses the given time of day string to a zero date time. func parseDayTime(daytime string, timezone *time.Location) (time.Time, error) { parts := strings.Split(daytime, ":") + hour, err := strconv.Atoi(parts[0]) if err != nil { return time.Time{}, fmt.Errorf("failed to parse hour of daytime: %w", err) } + if hour < 0 || hour > 24 { return time.Time{}, errTimeOfDayOutOfRange } + minute, err := strconv.Atoi(parts[1]) if err != nil { return time.Time{}, fmt.Errorf("failed to parse minute of daytime: %w", err) } + if minute < 0 || minute >= 60 { return time.Time{}, errTimeOfDayOutOfRange } + return time.Date(0, time.January, 1, hour, minute, 0, 0, timezone), nil } -// getTimeOfDay gets the time of day of the given time +// getTimeOfDay gets the time of day of the given time. func getTimeOfDay(targetTime time.Time) time.Time { return time.Date(0, time.January, 1, targetTime.Hour(), diff --git a/internal/pkg/values/timespan_test.go b/internal/pkg/values/timespan_test.go index dec8e4e..e846286 100644 --- a/internal/pkg/values/timespan_test.go +++ b/internal/pkg/values/timespan_test.go @@ -6,11 +6,14 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var zeroTime = time.Date(0, time.January, 1, 0, 0, 0, 0, time.UTC) func TestParseRelativeTimeSpan(t *testing.T) { + t.Parallel() + tests := []struct { name string timespanString string @@ -81,18 +84,23 @@ func TestParseRelativeTimeSpan(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult, gotErr := parseRelativeTimeSpan(test.timespanString) if test.wantErr { - assert.Error(t, gotErr) + require.Error(t, gotErr) } else { - assert.NoError(t, gotErr) + require.NoError(t, gotErr) } + assert.Equal(t, test.wantResult, gotResult) }) } } func TestRelativeTimeSpan_isWeekdayInRange(t *testing.T) { + t.Parallel() + tests := []struct { name string timespan relativeTimeSpan @@ -139,6 +147,8 @@ func TestRelativeTimeSpan_isWeekdayInRange(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult := test.timespan.isWeekdayInRange(test.weekday) assert.Equal(t, test.wantResult, gotResult) }) @@ -146,6 +156,8 @@ func TestRelativeTimeSpan_isWeekdayInRange(t *testing.T) { } func TestRelativeTimeSpan_isTimeOfDayInRange(t *testing.T) { + t.Parallel() + tests := []struct { name string timespan relativeTimeSpan @@ -234,6 +246,8 @@ func TestRelativeTimeSpan_isTimeOfDayInRange(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult := test.timespan.isTimeOfDayInRange(test.timeOfDay) assert.Equal(t, test.wantResult, gotResult) }) @@ -241,6 +255,8 @@ func TestRelativeTimeSpan_isTimeOfDayInRange(t *testing.T) { } func TestGetTimeOfDay(t *testing.T) { + t.Parallel() + tests := []struct { name string time time.Time @@ -260,6 +276,8 @@ func TestGetTimeOfDay(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult := getTimeOfDay(test.time) assert.Equal(t, test.wantResult, gotResult) }) @@ -267,6 +285,8 @@ func TestGetTimeOfDay(t *testing.T) { } func TestAbsoluteTimeSpan_isTimeInSpan(t *testing.T) { + t.Parallel() + tests := []struct { name string timespan absoluteTimeSpan @@ -313,6 +333,8 @@ func TestAbsoluteTimeSpan_isTimeInSpan(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult := test.timespan.isTimeInSpan(test.time) assert.Equal(t, test.wantResult, gotResult) }) @@ -320,6 +342,8 @@ func TestAbsoluteTimeSpan_isTimeInSpan(t *testing.T) { } func TestParseAbsoluteTimeSpan(t *testing.T) { + t.Parallel() + time1 := time.Date(2024, time.February, 27, 0, 0, 0, 0, time.UTC) time2 := time1.Add(48 * time.Hour) @@ -357,18 +381,24 @@ func TestParseAbsoluteTimeSpan(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult, gotErr := parseAbsoluteTimeSpan(test.timespanString) + if test.wantErr { - assert.Error(t, gotErr) + require.Error(t, gotErr) } else { - assert.NoError(t, gotErr) + require.NoError(t, gotErr) } + assert.Equal(t, test.wantResult, gotResult) }) } } func TestIsAbsoluteTimestamp(t *testing.T) { + t.Parallel() + time1 := time.Date(2024, time.February, 27, 0, 0, 0, 0, time.UTC) time2 := time1.Add(48 * time.Hour) @@ -401,6 +431,8 @@ func TestIsAbsoluteTimestamp(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + gotResult := isAbsoluteTimestamp(test.timespanString) assert.Equal(t, test.wantResult, gotResult) }) diff --git a/internal/pkg/values/triStateBool.go b/internal/pkg/values/triStateBool.go index 2ba2ddb..bf82614 100644 --- a/internal/pkg/values/triStateBool.go +++ b/internal/pkg/values/triStateBool.go @@ -5,20 +5,23 @@ import ( "strconv" ) -// triStateBool represents a boolean with an additional isSet field +// triStateBool represents a boolean with an additional isSet field. type triStateBool struct { isSet bool value bool } -// Set sets the value and sets isSet to true +// Set sets the value and sets isSet to true. func (t *triStateBool) Set(value string) error { var err error + t.value, err = strconv.ParseBool(value) if err != nil { return fmt.Errorf("failed to parse boolean value: %w", err) } + t.isSet = true + return nil } @@ -26,8 +29,10 @@ func (t *triStateBool) String() string { if !t.isSet { return "undefined" } - return fmt.Sprint(t.value) + + return strconv.FormatBool(t.value) } -// IsBoolFlag is there to make triStateBool implement flag.boolFlag, which lets the cli argument not need a value resulting in a "true" value -func (b *triStateBool) IsBoolFlag() bool { return true } +// IsBoolFlag is there to make triStateBool implement flag.boolFlag. +// This lets users use the flag without needing to specify a value. +func (t *triStateBool) IsBoolFlag() bool { return true } diff --git a/internal/pkg/values/util.go b/internal/pkg/values/util.go deleted file mode 100644 index 0bb4ca0..0000000 --- a/internal/pkg/values/util.go +++ /dev/null @@ -1,159 +0,0 @@ -package values - -import ( - "context" - "flag" - "fmt" - "os" - "strconv" - "time" -) - -const ( - annotationDownscalePeriod = "downscaler/downscale-period" - annotationDowntime = "downscaler/downtime" - annotationUpscalePeriod = "downscaler/upscale-period" - annotationUptime = "downscaler/uptime" - annotationExclude = "downscaler/exclude" - annotationExcludeUntil = "downscaler/exclude-until" - annotationForceUptime = "downscaler/force-uptime" - annotationForceDowntime = "downscaler/force-downtime" - annotationDownscaleReplicas = "downscaler/downscale-replicas" - annotationGracePeriod = "downscaler/grace-period" - - envUpscalePeriod = "UPSCALE_PERIOD" - envUptime = "DEFAULT_UPTIME" - envDownscalePeriod = "DOWNSCALE_PERIOD" - envDowntime = "DEFAULT_DOWNTIME" -) - -type resourceLogger interface { - // ErrorInvalidAnnotation adds an invalid annotation error on a resource - ErrorInvalidAnnotation(id string, message string, ctx context.Context) - // ErrorIncompatibleFields adds an incompatible fields error on a resource - ErrorIncompatibleFields(message string, ctx context.Context) -} - -// GetLayerFromAnnotations makes a layer and fills it with all values from the annotations -func GetLayerFromAnnotations(annotations map[string]string, logEvent resourceLogger, ctx context.Context) (Layer, error) { - result := NewLayer() - var err error - - if downscalePeriod, ok := annotations[annotationDownscalePeriod]; ok { - err = result.DownscalePeriod.Set(downscalePeriod) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationDownscalePeriod, fmt.Sprintf("failed to parse %q annotation: %s", annotationDownscalePeriod, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationDownscalePeriod, err) - } - } - if downtime, ok := annotations[annotationDowntime]; ok { - err = result.DownTime.Set(downtime) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationDowntime, fmt.Sprintf("failed to parse %q annotation: %s", annotationDowntime, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationDowntime, err) - } - } - if upscalePeriod, ok := annotations[annotationUpscalePeriod]; ok { - err = result.UpscalePeriod.Set(upscalePeriod) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationUpscalePeriod, fmt.Sprintf("failed to parse %q annotation: %s", annotationUpscalePeriod, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationUpscalePeriod, err) - } - } - if uptime, ok := annotations[annotationUptime]; ok { - err = result.UpTime.Set(uptime) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationUptime, fmt.Sprintf("failed to parse %q annotation: %s", annotationUptime, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationUptime, err) - } - } - if exclude, ok := annotations[annotationExclude]; ok { - err = result.Exclude.Set(exclude) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationExclude, fmt.Sprintf("failed to parse %q annotation: %s", annotationExclude, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationExclude, err) - } - } - if excludeUntil, ok := annotations[annotationExcludeUntil]; ok { - result.ExcludeUntil, err = time.Parse(time.RFC3339, excludeUntil) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationExcludeUntil, fmt.Sprintf("failed to parse %q annotation: %s", annotationExcludeUntil, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationExcludeUntil, err) - } - } - if forceUptime, ok := annotations[annotationForceUptime]; ok { - err = result.ForceUptime.Set(forceUptime) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationForceUptime, fmt.Sprintf("failed to parse %q annotation: %s", annotationForceUptime, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationForceUptime, err) - } - } - if forceDowntime, ok := annotations[annotationForceDowntime]; ok { - err = result.ForceDowntime.Set(forceDowntime) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationForceDowntime, fmt.Sprintf("failed to parse %q annotation: %s", annotationForceDowntime, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationForceDowntime, err) - } - } - if downscaleReplicasString, ok := annotations[annotationDownscaleReplicas]; ok { - downscaleReplicas, err := strconv.ParseInt(downscaleReplicasString, 10, 32) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationDownscaleReplicas, fmt.Sprintf("failed to parse %q annotation: %s", annotationDownscaleReplicas, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationDownscaleReplicas, err) - } - // #nosec G115 - result.DownscaleReplicas = int32(downscaleReplicas) - } - if gracePeriod, ok := annotations[annotationGracePeriod]; ok { - err = (*DurationValue)(&result.GracePeriod).Set(gracePeriod) - if err != nil { - logEvent.ErrorInvalidAnnotation(annotationGracePeriod, fmt.Sprintf("failed to parse %q annotation: %s", annotationGracePeriod, err.Error()), ctx) - return result, fmt.Errorf("failed to parse %q annotation: %w", annotationGracePeriod, err) - } - } - - if err = result.CheckForIncompatibleFields(); err != nil { - logEvent.ErrorIncompatibleFields(fmt.Sprintf("found incompatible fields: %s", err.Error()), ctx) - return result, fmt.Errorf("error: found incompatible fields: %w", err) - } - - return result, nil -} - -// GetEnvValue gets the env value and puts it in flag.Value -func GetEnvValue(key string, value flag.Value) error { - if val, ok := os.LookupEnv(key); ok { - err := value.Set(val) - if err != nil { - return fmt.Errorf("failed to set value: %w", err) - } - } - return nil -} - -// GetLayerFromEnv makes a layer and fills it with all values from environment variables -func GetLayerFromEnv() (Layer, error) { - result := NewLayer() - err := GetEnvValue(envUpscalePeriod, &result.UpscalePeriod) - if err != nil { - return result, fmt.Errorf("error while getting %q environment variable: %w", envUpscalePeriod, err) - } - err = GetEnvValue(envUptime, &result.UpTime) - if err != nil { - return result, fmt.Errorf("error while getting %q environment variable: %w", envUptime, err) - } - err = GetEnvValue(envDownscalePeriod, &result.DownscalePeriod) - if err != nil { - return result, fmt.Errorf("error while getting %q environment variable: %w", envDownscalePeriod, err) - } - err = GetEnvValue(envDowntime, &result.DownTime) - if err != nil { - return result, fmt.Errorf("error while getting %q environment variable: %w", envDowntime, err) - } - - if err = result.CheckForIncompatibleFields(); err != nil { - return result, fmt.Errorf("error: found incompatible fields: %w", err) - } - - return result, nil -} From d312573f15649cf7029e03464823a9e85b7593d3 Mon Sep 17 00:00:00 2001 From: Jonathan Mayer Date: Wed, 22 Jan 2025 06:46:02 +0100 Subject: [PATCH 20/58] Feat/exclude externally scaled workloads (#78) * feat: exclude workloads scaled by scaled objects * refactor: add comments and avoid type shadowing * fix: inverse logic * feat: add tests * refactor: externally scaled filter to better match exsiting structure * Squashed commit of the following: commit 3e40c69dacd20ccc59c6d49bbfe749b3c42d92a8 Author: Jonathan Mayer Date: Tue Jan 21 06:45:41 2025 +0100 Refactor/enforce stricter go linters (#93) * Merge branch 'perf/make-slices-more-efficient' * refactor: everything to stop new linters from crying * Squashed commit of the following: commit ae781977f09d9445c9d02caa7e1942ffc542541e Author: Jonathan Mayer Date: Mon Jan 13 09:35:27 2025 +0100 perf: reduce memory allocations (#81) * perf: reduce memory allocations * perf: avoid preallocating the slice with zero values * perf: avoid temporary copies in get resource functions commit ad608b6896074b7ed44b562c2bd4c6eb0026fd7e Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon Jan 13 08:21:37 2025 +0100 chore(deps): bump github.com/zalando-incubator/stackset-controller from 1.4.84 to 1.4.92 (#94) * chore(deps): bump github.com/zalando-incubator/stackset-controller Bumps [github.com/zalando-incubator/stackset-controller](https://github.com/zalando-incubator/stackset-controller) from 1.4.84 to 1.4.92. - [Release notes](https://github.com/zalando-incubator/stackset-controller/releases) - [Commits](https://github.com/zalando-incubator/stackset-controller/compare/v1.4.84...v1.4.92) --- updated-dependencies: - dependency-name: github.com/zalando-incubator/stackset-controller dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump deps dependabot didnt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 48d222f4ecc108be2ba8d211f69020cf3da15c63 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:47:01 2025 +0100 chore(deps): bump github.com/prometheus-operator/prometheus-operator/pkg/client (#87) Bumps [github.com/prometheus-operator/prometheus-operator/pkg/client](https://github.com/prometheus-operator/prometheus-operator) from 0.76.2 to 0.79.2. - [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases) - [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.2...v0.79.2) --- updated-dependencies: - dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit e856e24159534ce8e4a41479f385523dd5e23081 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:38:58 2025 +0100 chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 (#90) * chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 Bumps [github.com/kedacore/keda/v2](https://github.com/kedacore/keda) from 2.15.1 to 2.16.1. - [Release notes](https://github.com/kedacore/keda/releases) - [Changelog](https://github.com/kedacore/keda/blob/main/CHANGELOG.md) - [Commits](https://github.com/kedacore/keda/compare/v2.15.1...v2.16.1) --- updated-dependencies: - dependency-name: github.com/kedacore/keda/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * chore: automatically push pre-commit changes * Merge remote-tracking branch 'origin/main' * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: jonathan-mayer commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 0f9015c01fe06079fe8d78fac9f5d11e88ccdf21 Author: Jonathan Mayer Date: Thu Jan 9 14:39:09 2025 +0100 chore: add dependabot config (#83) commit f0b9f9e38f6697d878ef721a57c00e96e1c7ad05 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu Jan 9 14:33:04 2025 +0100 chore(deps): bump golang.org/x/net from 0.28.0 to 0.33.0 (#82) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.28.0 to 0.33.0. - [Commits](https://github.com/golang/net/compare/v0.28.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: resolve pr threads commit 4fcf2d25793a4197fcd60737411b231d221188dc Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue Jan 21 06:44:45 2025 +0100 chore(deps): bump actions/setup-go from 5.2.0 to 5.3.0 (#99) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.2.0 to 5.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.2.0...v5.3.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 6845a45b2bf0d95babdbaf64923a678955a46578 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue Jan 21 06:44:06 2025 +0100 chore(deps): bump golang from 1.23.4 to 1.23.5 (#98) Bumps golang from 1.23.4 to 1.23.5. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 4000ab1d8ba62bf79f1b88aa5c18cfc300360297 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu Jan 16 06:50:43 2025 +0100 chore(deps): bump k8s.io/client-go from 0.32.0 to 0.32.1 (#96) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.32.0 to 0.32.1. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.32.0...v0.32.1) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit ae781977f09d9445c9d02caa7e1942ffc542541e Author: Jonathan Mayer Date: Mon Jan 13 09:35:27 2025 +0100 perf: reduce memory allocations (#81) * perf: reduce memory allocations * perf: avoid preallocating the slice with zero values * perf: avoid temporary copies in get resource functions commit ad608b6896074b7ed44b562c2bd4c6eb0026fd7e Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon Jan 13 08:21:37 2025 +0100 chore(deps): bump github.com/zalando-incubator/stackset-controller from 1.4.84 to 1.4.92 (#94) * chore(deps): bump github.com/zalando-incubator/stackset-controller Bumps [github.com/zalando-incubator/stackset-controller](https://github.com/zalando-incubator/stackset-controller) from 1.4.84 to 1.4.92. - [Release notes](https://github.com/zalando-incubator/stackset-controller/releases) - [Commits](https://github.com/zalando-incubator/stackset-controller/compare/v1.4.84...v1.4.92) --- updated-dependencies: - dependency-name: github.com/zalando-incubator/stackset-controller dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump deps dependabot didnt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 48d222f4ecc108be2ba8d211f69020cf3da15c63 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:47:01 2025 +0100 chore(deps): bump github.com/prometheus-operator/prometheus-operator/pkg/client (#87) Bumps [github.com/prometheus-operator/prometheus-operator/pkg/client](https://github.com/prometheus-operator/prometheus-operator) from 0.76.2 to 0.79.2. - [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases) - [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.2...v0.79.2) --- updated-dependencies: - dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit e856e24159534ce8e4a41479f385523dd5e23081 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:38:58 2025 +0100 chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 (#90) * chore(deps): bump github.com/kedacore/keda/v2 from 2.15.1 to 2.16.1 Bumps [github.com/kedacore/keda/v2](https://github.com/kedacore/keda) from 2.15.1 to 2.16.1. - [Release notes](https://github.com/kedacore/keda/releases) - [Changelog](https://github.com/kedacore/keda/blob/main/CHANGELOG.md) - [Commits](https://github.com/kedacore/keda/compare/v2.15.1...v2.16.1) --- updated-dependencies: - dependency-name: github.com/kedacore/keda/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * chore: automatically push pre-commit changes * Merge remote-tracking branch 'origin/main' * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Squashed commit of the following: commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: jonathan-mayer commit 5cf9b47e93ccca874aabbbf5bec25e3fdebef280 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:19:20 2025 +0100 chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 (#89) * chore(deps): bump k8s.io/api from 0.31.0 to 0.32.0 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.31.0 to 0.32.0. - [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * fix: bump client-go to match with other dependencies --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit 7c5dc0864e10ce8ea283948058e97b9cd47c7818 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 07:03:48 2025 +0100 chore(deps): bump golang from 1.23.1 to 1.23.4 (#91) * chore(deps): bump golang from 1.23.1 to 1.23.4 Bumps golang from 1.23.1 to 1.23.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * chore: bump golang from 1.23.1 to 1.23.4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jonathan-mayer commit efe5db1f0f9a8d5bcc5701526966f0dc58bb89ab Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:57 2025 +0100 chore(deps): bump actions/setup-go from 5.0.2 to 5.2.0 (#85) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.0.2...v5.2.0) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 952f8b296e4c332f3bb619e61d9c9b5bdf6ca5df Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Jan 10 06:51:28 2025 +0100 chore(deps): bump docker/build-push-action from 5 to 6 (#84) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 0f9015c01fe06079fe8d78fac9f5d11e88ccdf21 Author: Jonathan Mayer Date: Thu Jan 9 14:39:09 2025 +0100 chore: add dependabot config (#83) commit f0b9f9e38f6697d878ef721a57c00e96e1c7ad05 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu Jan 9 14:33:04 2025 +0100 chore(deps): bump golang.org/x/net from 0.28.0 to 0.33.0 (#82) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.28.0 to 0.33.0. - [Commits](https://github.com/golang/net/compare/v0.28.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 3fcaee6be68c73dd03762fc7767ec9ce710b2907 Author: Jan <157487559+JTaeuber@users.noreply.github.com> Date: Fri Jan 3 11:47:18 2025 +0100 fix: make workflows run for forks (#77) * refactor: revert renameing of vars * refactor: make slice clipping more readable --- internal/api/kubernetes/client.go | 4 +- internal/pkg/scalable/util.go | 95 ++++++++++++++++++++++++++++-- internal/pkg/scalable/util_test.go | 49 +++++++++++++++ internal/pkg/scalable/workload.go | 4 +- 4 files changed, 144 insertions(+), 8 deletions(-) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index b19b5d1..b3a94ee 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -220,11 +220,11 @@ func (c client) addWorkloadEvent(eventType, reason, identifier, message string, Namespace: workload.GetNamespace(), }, InvolvedObject: corev1.ObjectReference{ - Kind: workload.GetObjectKind().GroupVersionKind().Kind, + Kind: workload.GroupVersionKind().Kind, Namespace: workload.GetNamespace(), Name: workload.GetName(), UID: workload.GetUID(), - APIVersion: workload.GetObjectKind().GroupVersionKind().GroupVersion().String(), + APIVersion: workload.GroupVersionKind().GroupVersion().String(), }, Reason: reason, Message: message, diff --git a/internal/pkg/scalable/util.go b/internal/pkg/scalable/util.go index 593b83f..d4b2aad 100644 --- a/internal/pkg/scalable/util.go +++ b/internal/pkg/scalable/util.go @@ -3,17 +3,20 @@ package scalable import ( "fmt" "log/slog" + "slices" "strconv" + "strings" "github.com/caas-team/gokubedownscaler/internal/pkg/util" + "k8s.io/apimachinery/pkg/runtime/schema" ) -const ( - annotationOriginalReplicas = "downscaler/original-replicas" -) +const annotationOriginalReplicas = "downscaler/original-replicas" // FilterExcluded filters the workloads to match the includeLabels, excludedNamespaces and excludedWorkloads. func FilterExcluded(workloads []Workload, includeLabels, excludedNamespaces, excludedWorkloads util.RegexList) []Workload { + externallyScaled := getExternallyScaled(workloads) + results := make([]Workload, 0, len(workloads)) for _, workload := range workloads { @@ -47,10 +50,94 @@ func FilterExcluded(workloads []Workload, includeLabels, excludedNamespaces, exc continue } + if isExternallyScaled(workload, externallyScaled) { + slog.Debug( + "the workload is scaled externally, excluding it from being scanned", + "workload", workload.GetName(), + "namespace", workload.GetNamespace(), + ) + + continue + } + results = append(results, workload) } - return results[:len(results):len(results)] // unallocate excess capacity + return slices.Clip(results) +} + +type workloadIdentifier struct { + gvk schema.GroupVersionKind + name string + namespace string +} + +// getExternallyScaled returns identifiers for workloads which are being scaled externally and should therefore be excluded. +func getExternallyScaled(workloads []Workload) []workloadIdentifier { + externallyScaled := make([]workloadIdentifier, 0, len(workloads)) + + for _, workload := range workloads { + scaledobject := getWorkloadAsScaledObject(workload) + if scaledobject == nil { + continue + } + + externallyScaled = append(externallyScaled, workloadIdentifier{ + gvk: schema.GroupVersionKind{ + Kind: scaledobject.Spec.ScaleTargetRef.Kind, + Group: strings.Split(scaledobject.Spec.ScaleTargetRef.APIVersion, "/")[0], + Version: strings.Split(scaledobject.Spec.ScaleTargetRef.APIVersion, "/")[1], + }, + name: scaledobject.Spec.ScaleTargetRef.Name, + namespace: scaledobject.Namespace, + }) + } + + return slices.Clip(externallyScaled) +} + +// isExternallyScaled checks if the workload matches any of the given workload identifiers. +func isExternallyScaled(workload Workload, externallyScaled []workloadIdentifier) bool { + for _, wid := range externallyScaled { + if wid.name != workload.GetName() { + continue + } + + if wid.namespace != workload.GetNamespace() { + continue + } + + if !(wid.gvk.Group == "" || wid.gvk.Group == workload.GroupVersionKind().Group) { + continue + } + + if !(wid.gvk.Version == "" || wid.gvk.Version == workload.GroupVersionKind().Version) { + continue + } + + if !(wid.gvk.Kind == "" || wid.gvk.Kind == workload.GroupVersionKind().Kind) { + continue + } + + return true + } + + return false +} + +// getWorkloadAsScaledObject tries to get the given workload as a scaled object. +func getWorkloadAsScaledObject(workload Workload) *scaledObject { + replicaScaled, isReplicaScaled := workload.(*replicaScaledWorkload) + if !isReplicaScaled { + return nil + } + + scaledObject, isScaledObject := replicaScaled.replicaScaledResource.(*scaledObject) + if !isScaledObject { + return nil + } + + return scaledObject } // isMatchingLabels check if the workload is matching any of the specified labels. diff --git a/internal/pkg/scalable/util_test.go b/internal/pkg/scalable/util_test.go index 0910a04..e6b2c39 100644 --- a/internal/pkg/scalable/util_test.go +++ b/internal/pkg/scalable/util_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/caas-team/gokubedownscaler/internal/pkg/util" + "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,6 +19,7 @@ func TestFilterExcluded(t *testing.T) { deployment1 Workload deployment2 Workload labeledDeployment Workload + scaledObject Workload } ns1 := ns{ deployment1: &replicaScaledWorkload{&deployment{Deployment: &appsv1.Deployment{ @@ -50,6 +52,45 @@ func TestFilterExcluded(t *testing.T) { }, }}}, } + ns3 := ns{ + deployment1: &replicaScaledWorkload{&deployment{Deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Deployment1", + Namespace: "Namespace3", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + }}}, + deployment2: &replicaScaledWorkload{&deployment{Deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Deployment2", + Namespace: "Namespace3", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + }}}, + scaledObject: &replicaScaledWorkload{&scaledObject{ScaledObject: &v1alpha1.ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ScaledObject1", + Namespace: "Namespace3", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "keda.sh/v1alpha1", + Kind: "ScaledObject", + }, + Spec: v1alpha1.ScaledObjectSpec{ + ScaleTargetRef: &v1alpha1.ScaleTarget{ + Name: "Deployment2", + APIVersion: "apps/v1", + Kind: "", + }, + }, + }}}, + } tests := []struct { name string workloads []Workload @@ -90,6 +131,14 @@ func TestFilterExcluded(t *testing.T) { excludedWorkloads: util.RegexList{regexp.MustCompile("Deployment1")}, // exclude Deployment1 want: []Workload{ns1.deployment2}, }, + { + name: "exclude scaled object scaled", + workloads: []Workload{ns3.deployment1, ns3.deployment2, ns3.scaledObject, ns1.deployment1, ns1.deployment2, ns2.deployment1}, + includeLabels: nil, + excludedNamespaces: nil, + excludedWorkloads: nil, + want: []Workload{ns3.deployment1, ns3.scaledObject, ns1.deployment1, ns1.deployment2, ns2.deployment1}, + }, } for _, test := range tests { diff --git a/internal/pkg/scalable/workload.go b/internal/pkg/scalable/workload.go index d02a9e7..0ce5204 100644 --- a/internal/pkg/scalable/workload.go +++ b/internal/pkg/scalable/workload.go @@ -62,14 +62,14 @@ type scalableResource interface { GetName() string // GetUID gets the uid of the workload GetUID() types.UID - // GetObjectKind gets the ObjectKind of the workload - GetObjectKind() schema.ObjectKind // GetLabels gets the labels of the workload GetLabels() map[string]string // GetCreationTimestamp gets the creation timestamp of the workload GetCreationTimestamp() metav1.Time // SetAnnotations sets the annotations on the resource. Changes won't be made on Kubernetes until update() is called SetAnnotations(annotations map[string]string) + // GroupVersionKind gets the group version kind of the workload + GroupVersionKind() schema.GroupVersionKind } // Workload provides all functions needed to scale the workload. From 5b7043825e10c18efecb5296980b8b10690cfce5 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:12:43 +0100 Subject: [PATCH 21/58] refactor: rebased leader-election onto main --- cmd/kubedownscaler/main.go | 51 +++++++++++++++ internal/api/kubernetes/client.go | 103 +++++++++++++++++++++++++++++- internal/api/kubernetes/util.go | 3 +- 3 files changed, 152 insertions(+), 5 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 10f83b7..d19f564 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -6,9 +6,13 @@ import ( "fmt" "log/slog" "os" + "os/signal" "regexp" "sync" + "sync/atomic" + "syscall" "time" + _ "time/tzdata" "github.com/caas-team/gokubedownscaler/internal/api/kubernetes" @@ -60,6 +64,7 @@ func main() { slog.Error("failed to get layer from env", "error", err) os.Exit(1) } +} if config.Debug || config.DryRun { slog.SetLogLoggerLevel(slog.LevelDebug) @@ -69,6 +74,11 @@ func main() { slog.Error("found incompatible fields", "error", err) os.Exit(1) } + downscalerNamespace, err := kubernetes.GetCurrentNamespaceFromFile() + if err != nil { + slog.Error("failed to get downscaler namespace", "error", err) + os.Exit(1) + } ctx := context.Background() @@ -80,6 +90,42 @@ func main() { os.Exit(1) } + // leader election and graceful termination + go func() { + // create a context to handle termination gracefully + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // listen for termination signals in a separate goroutine + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) + + // Goroutine for leader election and lease renewal + go func() { + err := client.CreateOrUpdateLease(ctx, downscalerNamespace, &isLeader) + if err != nil { + slog.Error("failed to acquire lease", "error", err) + os.Exit(1) + } + }() + + // pause and wait for termination signal + <-sigs + slog.Debug("received termination signal, deleting lease") + + // delete the lease after termination signal is intercepted + err := client.DeleteLease(ctx, downscalerNamespace, &isLeader) + if err != nil { + slog.Error("failed to delete lease", "error", err) + } else { + slog.Debug("lease deleted successfully") + } + + // cancel the context to stop the lease renewal goroutine and exit the main process + cancel() + os.Exit(1) + }() + slog.Info("started downscaler") err = scanWorkloads(client, ctx, &layerCli, &layerEnv, config) @@ -102,6 +148,11 @@ func scanWorkloads( config *util.RuntimeConfiguration, ) error { for { + if !isLeader.Load() { + slog.Debug("not the leader, skipping workload scanning") + time.Sleep(5 * time.Second) // Sync sleep with lease duration + continue + } slog.Info("scanning workloads") workloads, err := client.GetWorkloads(config.IncludeNamespaces, config.IncludeResources, ctx) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index b3a94ee..4faa732 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -3,16 +3,20 @@ package kubernetes import ( "context" "crypto/sha256" + "errors" "fmt" "log/slog" + "os" "strings" "time" + "sync/atomic" argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" keda "github.com/kedacore/keda/v2/pkg/generated/clientset/versioned" monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" zalando "github.com/zalando-incubator/stackset-controller/pkg/clientset" + coordv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -21,9 +25,15 @@ import ( const ( componentName = "kubedownscaler" timeout = 30 * time.Second + componentName = "kubedownscaler" + leaseName = "downscaler-lease" + leaseDuration = 30 * time.Second + leaseCheckSleepDuration = leaseDuration / 2 ) -// Client is an interface representing a high-level client to get and modify Kubernetes resources. +var errResourceNotSupported = errors.New("error: specified rescource type is not supported") + +// Client is an interface representing a high-level client to get and modify Kubernetes resources type Client interface { // GetNamespaceAnnotations gets the annotations of the workload's namespace GetNamespaceAnnotations(namespace string, ctx context.Context) (map[string]string, error) @@ -35,6 +45,10 @@ type Client interface { UpscaleWorkload(workload scalable.Workload, ctx context.Context) error // addWorkloadEvent creates a new event on the workload addWorkloadEvent(eventType string, reason string, id string, message string, workload scalable.Workload, ctx context.Context) error + // CreateOrUpdateLease creates or update the downscaler lease + CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error + // DeleteLease deletes the downscaler lease + DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error } // NewClient makes a new Client. @@ -83,13 +97,13 @@ func NewClient(kubeconfig string, dryRun bool) (client, error) { return kubeclient, nil } -// client is a Kubernetes client with downscaling specific functions. +// client is a Kubernetes client with downscaling specific functions type client struct { clientsets *scalable.Clientsets dryRun bool } -// GetNamespaceAnnotations gets the annotations of the workload's namespace. +// GetNamespaceAnnotations gets the annotations of the workload's namespace func (c client) GetNamespaceAnnotations(namespace string, ctx context.Context) (map[string]string, error) { ns, err := c.clientsets.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { @@ -240,3 +254,86 @@ func (c client) addWorkloadEvent(eventType, reason, identifier, message string, return nil } + +// CreateOrUpdateLease attempts to acquire and maintain a lease for leadership. +func (c client) CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { + // get hostname for holder identity + holderIdentity, err := os.Hostname() + if err != nil { + slog.Error("failed to get hostname", "error", err) + return err + } + + leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) + leaseDurationSeconds := int32(leaseDuration.Seconds()) + + for { + // lease Object + lease := &coordv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: leaseName, + Namespace: leaseNamespace, + }, + Spec: coordv1.LeaseSpec{ + HolderIdentity: &holderIdentity, + LeaseDurationSeconds: &leaseDurationSeconds, + RenewTime: &metav1.MicroTime{Time: time.Now()}, + }, + } + + // search for an existing lease inside the namespace + existingLease, err := leasesClient.Get(ctx, leaseName, metav1.GetOptions{}) + if err != nil { + // creates new lease if lease doesn't exist, and jump to the next iteration + slog.Debug("creating new lease", "lease", leaseName, "namespace", leaseNamespace) + _, err = leasesClient.Create(ctx, lease, metav1.CreateOptions{}) + if err != nil { + slog.Error("failed to create lease", "error", err) + time.Sleep(leaseCheckSleepDuration) + continue + } + slog.Debug("acquired lease", "holder", holderIdentity, "namespace", leaseNamespace) + isLeader.Store(true) + } else { + // check if the existing lease has expired or is held by another pod; if it is held by another pod jump to the next iteration + if existingLease.Spec.RenewTime != nil && + time.Since(existingLease.Spec.RenewTime.Time) < leaseDuration { + if *existingLease.Spec.HolderIdentity != holderIdentity { + slog.Debug("lease already held by another", "holder", *existingLease.Spec.HolderIdentity) + isLeader.Store(false) + time.Sleep(leaseCheckSleepDuration) + continue + } + } + + // update the lease if it is currently held by the current pod + existingLease.Spec.HolderIdentity = &holderIdentity + existingLease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} + _, err = leasesClient.Update(ctx, existingLease, metav1.UpdateOptions{}) + if err != nil { + slog.Error("failed to update lease", "error", err) + time.Sleep(leaseCheckSleepDuration) + continue + } + slog.Debug("lease renewed", "holder", holderIdentity, "namespace", leaseNamespace) + isLeader.Store(true) + } + + // sleep before renewing + time.Sleep(leaseCheckSleepDuration) + } +} + +func (c client) DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { + leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) + + err := leasesClient.Delete(ctx, leaseName, metav1.DeleteOptions{}) + if err != nil { + slog.Error("failed to delete lease %s in namespace %s", leaseName, leaseNamespace) + return err + } + + isLeader.Store(false) + slog.Debug("deleted lease %s in namespace %s", leaseName, leaseNamespace) + return nil +} diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 91b1827..698ae68 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -2,10 +2,9 @@ package kubernetes import ( "fmt" - "os" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "os" ) // getConfig gets a rest.Config for the specified kubeconfig or if empty from the in-cluster config. From d6c44b586a8e05c456a1917ace82f2147a34a12c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Dec 2024 12:16:15 +0000 Subject: [PATCH 22/58] chore: automatically push pre-commit changes --- internal/api/kubernetes/util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 698ae68..91b1827 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -2,9 +2,10 @@ package kubernetes import ( "fmt" + "os" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "os" ) // getConfig gets a rest.Config for the specified kubeconfig or if empty from the in-cluster config. From 69854b627f36011cb890e72a7e14a3f3d2b43548 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sat, 25 Jan 2025 17:27:36 +0100 Subject: [PATCH 23/58] refactor: leader election with native library --- cmd/kubedownscaler/main.go | 86 ++++++++++++++++--------------- internal/api/kubernetes/client.go | 41 ++++++++++----- 2 files changed, 73 insertions(+), 54 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index d19f564..9cee0ee 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -4,12 +4,12 @@ import ( "context" "flag" "fmt" + "k8s.io/client-go/tools/leaderelection" "log/slog" "os" "os/signal" "regexp" "sync" - "sync/atomic" "syscall" "time" @@ -26,6 +26,8 @@ const ( defaultGracePeriod = 15 * time.Minute defaultDownscaleReplicas = 0 + leaseName = "downscaler-lease" + // runtime config defaults. defaultInterval = 30 * time.Second ) @@ -64,7 +66,6 @@ func main() { slog.Error("failed to get layer from env", "error", err) os.Exit(1) } -} if config.Debug || config.DryRun { slog.SetLogLoggerLevel(slog.LevelDebug) @@ -80,8 +81,6 @@ func main() { os.Exit(1) } - ctx := context.Background() - slog.Debug("getting client for kubernetes") client, err := kubernetes.NewClient(config.Kubeconfig, config.DryRun) @@ -90,45 +89,53 @@ func main() { os.Exit(1) } - // leader election and graceful termination - go func() { - // create a context to handle termination gracefully - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // listen for termination signals in a separate goroutine - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) - - // Goroutine for leader election and lease renewal - go func() { - err := client.CreateOrUpdateLease(ctx, downscalerNamespace, &isLeader) - if err != nil { - slog.Error("failed to acquire lease", "error", err) - os.Exit(1) - } - }() - - // pause and wait for termination signal - <-sigs - slog.Debug("received termination signal, deleting lease") - - // delete the lease after termination signal is intercepted - err := client.DeleteLease(ctx, downscalerNamespace, &isLeader) - if err != nil { - slog.Error("failed to delete lease", "error", err) - } else { - slog.Debug("lease deleted successfully") - } + run := func(ctx context.Context) { + loop(client, ctx, layerCli, layerEnv, config) + } - // cancel the context to stop the lease renewal goroutine and exit the main process - cancel() + ctx, cancel := context.WithCancel(context.Background()) + + lease, err := client.CreateLease(leaseName, downscalerNamespace, ctx) + if err != nil { + slog.Error("failed to create lease", "error", err) os.Exit(1) + } + + defer cancel() + + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + go func() { + <-ch + cancel() }() + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lease, + ReleaseOnCancel: true, + LeaseDuration: 60 * time.Second, + RenewDeadline: 15 * time.Second, + RetryPeriod: 5 * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + slog.Info("started leading") + run(ctx) + }, + OnStoppedLeading: func() { + slog.Info("stopped leading") + }, + OnNewLeader: func(identity string) { + slog.Info("new leader elected: ", "identity", identity) + }, + }, + }) + +} + +func loop(client kubernetes.Client, ctx context.Context, layerCli values.Layer, layerEnv values.Layer, config *util.RuntimeConfiguration) { slog.Info("started downscaler") - err = scanWorkloads(client, ctx, &layerCli, &layerEnv, config) + err := scanWorkloads(client, ctx, &layerCli, &layerEnv, config) if err != nil { slog.Error("failed to scan over workloads", "error", err, @@ -148,11 +155,6 @@ func scanWorkloads( config *util.RuntimeConfiguration, ) error { for { - if !isLeader.Load() { - slog.Debug("not the leader, skipping workload scanning") - time.Sleep(5 * time.Second) // Sync sleep with lease duration - continue - } slog.Info("scanning workloads") workloads, err := client.GetWorkloads(config.IncludeNamespaces, config.IncludeResources, ctx) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 4faa732..7083aec 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -3,20 +3,19 @@ package kubernetes import ( "context" "crypto/sha256" - "errors" "fmt" "log/slog" "os" "strings" "time" - "sync/atomic" + + "k8s.io/client-go/tools/leaderelection/resourcelock" argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" keda "github.com/kedacore/keda/v2/pkg/generated/clientset/versioned" monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" zalando "github.com/zalando-incubator/stackset-controller/pkg/clientset" - coordv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -25,14 +24,8 @@ import ( const ( componentName = "kubedownscaler" timeout = 30 * time.Second - componentName = "kubedownscaler" - leaseName = "downscaler-lease" - leaseDuration = 30 * time.Second - leaseCheckSleepDuration = leaseDuration / 2 ) -var errResourceNotSupported = errors.New("error: specified rescource type is not supported") - // Client is an interface representing a high-level client to get and modify Kubernetes resources type Client interface { // GetNamespaceAnnotations gets the annotations of the workload's namespace @@ -43,12 +36,14 @@ type Client interface { DownscaleWorkload(replicas int32, workload scalable.Workload, ctx context.Context) error // UpscaleWorkload upscales the workload to the original replicas UpscaleWorkload(workload scalable.Workload, ctx context.Context) error + // CreateLease creates a new lease for the downscaler + CreateLease(leaseName string, leaseNamespace string, ctx context.Context) (*resourcelock.LeaseLock, error) // addWorkloadEvent creates a new event on the workload addWorkloadEvent(eventType string, reason string, id string, message string, workload scalable.Workload, ctx context.Context) error // CreateOrUpdateLease creates or update the downscaler lease - CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error + // CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error // DeleteLease deletes the downscaler lease - DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error + // DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error } // NewClient makes a new Client. @@ -255,6 +250,28 @@ func (c client) addWorkloadEvent(eventType, reason, identifier, message string, return nil } +func (c client) CreateLease(leaseName string, leaseNamespace string, ctx context.Context) (*resourcelock.LeaseLock, error) { + hostname, err := os.Hostname() + if err != nil { + slog.Error("failed to get hostname", "error", err) + return nil, err + } + + lease := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: leaseName, + Namespace: leaseNamespace, + }, + Client: c.clientsets.Kubernetes.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: hostname, + }, + } + + return lease, err +} + +/* // CreateOrUpdateLease attempts to acquire and maintain a lease for leadership. func (c client) CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { // get hostname for holder identity @@ -336,4 +353,4 @@ func (c client) DeleteLease(ctx context.Context, leaseNamespace string, isLeader isLeader.Store(false) slog.Debug("deleted lease %s in namespace %s", leaseName, leaseNamespace) return nil -} +}*/ From a662bd6623f93fd5c5c744be169717e253f2ccea Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 25 Jan 2025 16:28:46 +0000 Subject: [PATCH 24/58] chore: automatically push pre-commit changes --- cmd/kubedownscaler/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 9cee0ee..1f4abaf 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -4,7 +4,6 @@ import ( "context" "flag" "fmt" - "k8s.io/client-go/tools/leaderelection" "log/slog" "os" "os/signal" @@ -13,6 +12,8 @@ import ( "syscall" "time" + "k8s.io/client-go/tools/leaderelection" + _ "time/tzdata" "github.com/caas-team/gokubedownscaler/internal/api/kubernetes" @@ -129,7 +130,6 @@ func main() { }, }, }) - } func loop(client kubernetes.Client, ctx context.Context, layerCli values.Layer, layerEnv values.Layer, config *util.RuntimeConfiguration) { From f3590466af2b8be0620cad42d94b2b9455dec598 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sat, 25 Jan 2025 17:38:07 +0100 Subject: [PATCH 25/58] refactor: linter suggestions --- cmd/kubedownscaler/main.go | 16 ++++++++-------- internal/api/kubernetes/client.go | 11 +++++------ internal/api/kubernetes/util.go | 4 +++- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 1f4abaf..ceefe6a 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -11,15 +11,13 @@ import ( "sync" "syscall" "time" - - "k8s.io/client-go/tools/leaderelection" - _ "time/tzdata" "github.com/caas-team/gokubedownscaler/internal/api/kubernetes" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" "github.com/caas-team/gokubedownscaler/internal/pkg/util" "github.com/caas-team/gokubedownscaler/internal/pkg/values" + "k8s.io/client-go/tools/leaderelection" ) const ( @@ -76,6 +74,7 @@ func main() { slog.Error("found incompatible fields", "error", err) os.Exit(1) } + downscalerNamespace, err := kubernetes.GetCurrentNamespaceFromFile() if err != nil { slog.Error("failed to get downscaler namespace", "error", err) @@ -91,7 +90,7 @@ func main() { } run := func(ctx context.Context) { - loop(client, ctx, layerCli, layerEnv, config) + loop(client, ctx, &layerCli, &layerEnv, config) } ctx, cancel := context.WithCancel(context.Background()) @@ -106,6 +105,7 @@ func main() { ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + go func() { <-ch cancel() @@ -132,16 +132,16 @@ func main() { }) } -func loop(client kubernetes.Client, ctx context.Context, layerCli values.Layer, layerEnv values.Layer, config *util.RuntimeConfiguration) { +func loop(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration) { slog.Info("started downscaler") - err := scanWorkloads(client, ctx, &layerCli, &layerEnv, config) + err := scanWorkloads(client, ctx, layerCli, layerEnv, config) if err != nil { slog.Error("failed to scan over workloads", "error", err, "config", config, - "CliLayer", layerCli, - "EnvLayer", layerEnv, + "CliLayer", &layerCli, + "EnvLayer", &layerEnv, ) os.Exit(1) } diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 7083aec..7c5f563 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -9,8 +9,6 @@ import ( "strings" "time" - "k8s.io/client-go/tools/leaderelection/resourcelock" - argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" "github.com/caas-team/gokubedownscaler/internal/pkg/scalable" keda "github.com/kedacore/keda/v2/pkg/generated/clientset/versioned" @@ -19,6 +17,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/leaderelection/resourcelock" ) const ( @@ -26,7 +25,7 @@ const ( timeout = 30 * time.Second ) -// Client is an interface representing a high-level client to get and modify Kubernetes resources +// Client is an interface representing a high-level client to get and modify Kubernetes resources. type Client interface { // GetNamespaceAnnotations gets the annotations of the workload's namespace GetNamespaceAnnotations(namespace string, ctx context.Context) (map[string]string, error) @@ -92,13 +91,13 @@ func NewClient(kubeconfig string, dryRun bool) (client, error) { return kubeclient, nil } -// client is a Kubernetes client with downscaling specific functions +// client is a Kubernetes client with downscaling specific functions. type client struct { clientsets *scalable.Clientsets dryRun bool } -// GetNamespaceAnnotations gets the annotations of the workload's namespace +// GetNamespaceAnnotations gets the annotations of the workload's namespace. func (c client) GetNamespaceAnnotations(namespace string, ctx context.Context) (map[string]string, error) { ns, err := c.clientsets.Kubernetes.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { @@ -268,7 +267,7 @@ func (c client) CreateLease(leaseName string, leaseNamespace string, ctx context }, } - return lease, err + return lease, fmt.Errorf("failed to create lease: %w", err) } /* diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 91b1827..0c44ad6 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -17,12 +17,14 @@ func getConfig(kubeconfig string) (*rest.Config, error) { return clientcmd.BuildConfigFromFlags("", kubeconfig) //nolint: wrapcheck // error gets wrapped in the calling function, so its fine } -// GetCurrentNamespaceFromFile retrieves downscaler namespace from its service account file +// GetCurrentNamespaceFromFile retrieves downscaler namespace from its service account file. func GetCurrentNamespaceFromFile() (string, error) { namespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + namespace, err := os.ReadFile(namespaceFile) if err != nil { return "", fmt.Errorf("failed to read namespace file: %v", err) } + return string(namespace), nil } From a7efc1e25e5320e1d66a72cd494cb2b77d7a63b9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 25 Jan 2025 16:39:29 +0000 Subject: [PATCH 26/58] chore: automatically push pre-commit changes --- internal/api/kubernetes/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 0c44ad6..7be036d 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -23,7 +23,7 @@ func GetCurrentNamespaceFromFile() (string, error) { namespace, err := os.ReadFile(namespaceFile) if err != nil { - return "", fmt.Errorf("failed to read namespace file: %v", err) + return "", fmt.Errorf("failed to read namespace file: %w", err) } return string(namespace), nil From 47eb6ef07cedf16608803b4707d683eec1d0b861 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sat, 25 Jan 2025 17:45:08 +0100 Subject: [PATCH 27/58] refactor: linter suggestions --- cmd/kubedownscaler/main.go | 2 +- internal/api/kubernetes/client.go | 94 +------------------------------ 2 files changed, 4 insertions(+), 92 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index ceefe6a..f8fb740 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -95,7 +95,7 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) - lease, err := client.CreateLease(leaseName, downscalerNamespace, ctx) + lease, err := client.CreateLease(leaseName, downscalerNamespace) if err != nil { slog.Error("failed to create lease", "error", err) os.Exit(1) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 7c5f563..966a498 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -36,13 +36,9 @@ type Client interface { // UpscaleWorkload upscales the workload to the original replicas UpscaleWorkload(workload scalable.Workload, ctx context.Context) error // CreateLease creates a new lease for the downscaler - CreateLease(leaseName string, leaseNamespace string, ctx context.Context) (*resourcelock.LeaseLock, error) + CreateLease(leaseName, leaseNamespace string) (*resourcelock.LeaseLock, error) // addWorkloadEvent creates a new event on the workload addWorkloadEvent(eventType string, reason string, id string, message string, workload scalable.Workload, ctx context.Context) error - // CreateOrUpdateLease creates or update the downscaler lease - // CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error - // DeleteLease deletes the downscaler lease - // DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error } // NewClient makes a new Client. @@ -249,11 +245,11 @@ func (c client) addWorkloadEvent(eventType, reason, identifier, message string, return nil } -func (c client) CreateLease(leaseName string, leaseNamespace string, ctx context.Context) (*resourcelock.LeaseLock, error) { +func (c client) CreateLease(leaseName, leaseNamespace string) (*resourcelock.LeaseLock, error) { hostname, err := os.Hostname() if err != nil { slog.Error("failed to get hostname", "error", err) - return nil, err + return nil, fmt.Errorf("failed to get hostname: %w", err) } lease := &resourcelock.LeaseLock{ @@ -269,87 +265,3 @@ func (c client) CreateLease(leaseName string, leaseNamespace string, ctx context return lease, fmt.Errorf("failed to create lease: %w", err) } - -/* -// CreateOrUpdateLease attempts to acquire and maintain a lease for leadership. -func (c client) CreateOrUpdateLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { - // get hostname for holder identity - holderIdentity, err := os.Hostname() - if err != nil { - slog.Error("failed to get hostname", "error", err) - return err - } - - leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) - leaseDurationSeconds := int32(leaseDuration.Seconds()) - - for { - // lease Object - lease := &coordv1.Lease{ - ObjectMeta: metav1.ObjectMeta{ - Name: leaseName, - Namespace: leaseNamespace, - }, - Spec: coordv1.LeaseSpec{ - HolderIdentity: &holderIdentity, - LeaseDurationSeconds: &leaseDurationSeconds, - RenewTime: &metav1.MicroTime{Time: time.Now()}, - }, - } - - // search for an existing lease inside the namespace - existingLease, err := leasesClient.Get(ctx, leaseName, metav1.GetOptions{}) - if err != nil { - // creates new lease if lease doesn't exist, and jump to the next iteration - slog.Debug("creating new lease", "lease", leaseName, "namespace", leaseNamespace) - _, err = leasesClient.Create(ctx, lease, metav1.CreateOptions{}) - if err != nil { - slog.Error("failed to create lease", "error", err) - time.Sleep(leaseCheckSleepDuration) - continue - } - slog.Debug("acquired lease", "holder", holderIdentity, "namespace", leaseNamespace) - isLeader.Store(true) - } else { - // check if the existing lease has expired or is held by another pod; if it is held by another pod jump to the next iteration - if existingLease.Spec.RenewTime != nil && - time.Since(existingLease.Spec.RenewTime.Time) < leaseDuration { - if *existingLease.Spec.HolderIdentity != holderIdentity { - slog.Debug("lease already held by another", "holder", *existingLease.Spec.HolderIdentity) - isLeader.Store(false) - time.Sleep(leaseCheckSleepDuration) - continue - } - } - - // update the lease if it is currently held by the current pod - existingLease.Spec.HolderIdentity = &holderIdentity - existingLease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} - _, err = leasesClient.Update(ctx, existingLease, metav1.UpdateOptions{}) - if err != nil { - slog.Error("failed to update lease", "error", err) - time.Sleep(leaseCheckSleepDuration) - continue - } - slog.Debug("lease renewed", "holder", holderIdentity, "namespace", leaseNamespace) - isLeader.Store(true) - } - - // sleep before renewing - time.Sleep(leaseCheckSleepDuration) - } -} - -func (c client) DeleteLease(ctx context.Context, leaseNamespace string, isLeader *atomic.Bool) error { - leasesClient := c.clientsets.Kubernetes.CoordinationV1().Leases(leaseNamespace) - - err := leasesClient.Delete(ctx, leaseName, metav1.DeleteOptions{}) - if err != nil { - slog.Error("failed to delete lease %s in namespace %s", leaseName, leaseNamespace) - return err - } - - isLeader.Store(false) - slog.Debug("deleted lease %s in namespace %s", leaseName, leaseNamespace) - return nil -}*/ From 3e72bdeaf4a633eb9de48c35a8e1b4978f8945d3 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sat, 25 Jan 2025 17:46:56 +0100 Subject: [PATCH 28/58] refactor: leaserole refactoring --- deployments/chart/templates/_helpers.tpl | 13 ------------- deployments/chart/templates/leaserole.yaml | 12 +++++++++++- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/deployments/chart/templates/_helpers.tpl b/deployments/chart/templates/_helpers.tpl index e4c073c..840282f 100644 --- a/deployments/chart/templates/_helpers.tpl +++ b/deployments/chart/templates/_helpers.tpl @@ -270,16 +270,3 @@ Create defined permissions for roles {{/* Create defined permissions for lease role */}} -{{- define "go-kube-downscaler.leases.permissions" -}} -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - create - - watch - - list - - update - - delete -{{- end -}} diff --git a/deployments/chart/templates/leaserole.yaml b/deployments/chart/templates/leaserole.yaml index 322352f..93d4701 100644 --- a/deployments/chart/templates/leaserole.yaml +++ b/deployments/chart/templates/leaserole.yaml @@ -4,7 +4,17 @@ metadata: name: {{ include "go-kube-downscaler.fullname" . }}-lease-role namespace: {{ .Release.Namespace }} rules: -{{ include "go-kube-downscaler.leases.permissions" . }} +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - watch + - list + - update + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding From 3e23a8392e42eb1f82d167f1c8cc79e11e3cb6e2 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sat, 25 Jan 2025 17:54:09 +0100 Subject: [PATCH 29/58] refactor: layerCli and layerEnv to extract their values --- cmd/kubedownscaler/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index f8fb740..d6af302 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -140,8 +140,8 @@ func loop(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *val slog.Error("failed to scan over workloads", "error", err, "config", config, - "CliLayer", &layerCli, - "EnvLayer", &layerEnv, + "CliLayer", *layerCli, + "EnvLayer", *layerEnv, ) os.Exit(1) } From 031a32ffc343e86db189b201d4e39a3dde80e5d1 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sat, 25 Jan 2025 18:08:04 +0100 Subject: [PATCH 30/58] refactor: wrong error on return, log messages for leader election --- cmd/kubedownscaler/main.go | 2 +- internal/api/kubernetes/client.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index d6af302..12a0715 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -126,7 +126,7 @@ func main() { slog.Info("stopped leading") }, OnNewLeader: func(identity string) { - slog.Info("new leader elected: ", "identity", identity) + slog.Info("new leader elected", "identity", identity) }, }, }) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 966a498..9054880 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -263,5 +263,5 @@ func (c client) CreateLease(leaseName, leaseNamespace string) (*resourcelock.Lea }, } - return lease, fmt.Errorf("failed to create lease: %w", err) + return lease, nil } From aeb4b729f84210e484a7022edb61b50a81e35d0b Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Sun, 26 Jan 2025 16:38:47 +0100 Subject: [PATCH 31/58] refactor: improved leader election mechanism, removed comment from helpers.tpl --- cmd/kubedownscaler/main.go | 37 ++++++++++++------------ deployments/chart/templates/_helpers.tpl | 3 -- internal/api/kubernetes/util.go | 6 ++-- 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 12a0715..13a585c 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -75,12 +75,6 @@ func main() { os.Exit(1) } - downscalerNamespace, err := kubernetes.GetCurrentNamespaceFromFile() - if err != nil { - slog.Error("failed to get downscaler namespace", "error", err) - os.Exit(1) - } - slog.Debug("getting client for kubernetes") client, err := kubernetes.NewClient(config.Kubeconfig, config.DryRun) @@ -89,25 +83,30 @@ func main() { os.Exit(1) } - run := func(ctx context.Context) { - loop(client, ctx, &layerCli, &layerEnv, config) - } - ctx, cancel := context.WithCancel(context.Background()) - lease, err := client.CreateLease(leaseName, downscalerNamespace) + defer cancel() + + downscalerNamespace, err := kubernetes.GetCurrentNamespace() if err != nil { - slog.Error("failed to create lease", "error", err) - os.Exit(1) + slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) + startScanning(client, ctx, &layerCli, &layerEnv, config) + + return } - defer cancel() + lease, err := client.CreateLease(leaseName, downscalerNamespace) + if err != nil { + slog.Warn("failed to create lease", "error", err) + slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") + startScanning(client, ctx, &layerCli, &layerEnv, config) + } - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) go func() { - <-ch + <-sigs cancel() }() @@ -120,7 +119,7 @@ func main() { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { slog.Info("started leading") - run(ctx) + startScanning(client, ctx, &layerCli, &layerEnv, config) }, OnStoppedLeading: func() { slog.Info("stopped leading") @@ -132,7 +131,7 @@ func main() { }) } -func loop(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration) { +func startScanning(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration) { slog.Info("started downscaler") err := scanWorkloads(client, ctx, layerCli, layerEnv, config) diff --git a/deployments/chart/templates/_helpers.tpl b/deployments/chart/templates/_helpers.tpl index 840282f..058f3a8 100644 --- a/deployments/chart/templates/_helpers.tpl +++ b/deployments/chart/templates/_helpers.tpl @@ -267,6 +267,3 @@ Create defined permissions for roles {{- end }} {{- end }} {{- end }} -{{/* -Create defined permissions for lease role -*/}} diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 7be036d..39042fd 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -17,9 +17,9 @@ func getConfig(kubeconfig string) (*rest.Config, error) { return clientcmd.BuildConfigFromFlags("", kubeconfig) //nolint: wrapcheck // error gets wrapped in the calling function, so its fine } -// GetCurrentNamespaceFromFile retrieves downscaler namespace from its service account file. -func GetCurrentNamespaceFromFile() (string, error) { - namespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace" +// GetCurrentNamespace retrieves downscaler namespace from its service account file. +func GetCurrentNamespace() (string, error) { + const namespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" namespace, err := os.ReadFile(namespaceFile) if err != nil { From 6b671928382a85db8b968b1fd4cb84e5ee8e15ff Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Mon, 27 Jan 2025 21:42:02 +0100 Subject: [PATCH 32/58] refactor: leader election logic, lease time --- cmd/kubedownscaler/main.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 13a585c..b740bd8 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -85,11 +85,10 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - downscalerNamespace, err := kubernetes.GetCurrentNamespace() if err != nil { slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) + slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") startScanning(client, ctx, &layerCli, &layerEnv, config) return @@ -98,10 +97,11 @@ func main() { lease, err := client.CreateLease(leaseName, downscalerNamespace) if err != nil { slog.Warn("failed to create lease", "error", err) - slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") - startScanning(client, ctx, &layerCli, &layerEnv, config) + os.Exit(1) } + defer cancel() + sigs := make(chan os.Signal, 1) signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) @@ -113,8 +113,8 @@ func main() { leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lease, ReleaseOnCancel: true, - LeaseDuration: 60 * time.Second, - RenewDeadline: 15 * time.Second, + LeaseDuration: 30 * time.Second, + RenewDeadline: 20 * time.Second, RetryPeriod: 5 * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { From 29e9610f8cb261f4ab787c9b811447e3602c547f Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:40:12 +0100 Subject: [PATCH 33/58] refactor: added error handling for startScanning --- cmd/kubedownscaler/main.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index b740bd8..a6681cc 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -119,7 +119,10 @@ func main() { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { slog.Info("started leading") - startScanning(client, ctx, &layerCli, &layerEnv, config) + err := startScanning(client, ctx, &layerCli, &layerEnv, config) + if err != nil { + return + } }, OnStoppedLeading: func() { slog.Info("stopped leading") @@ -131,7 +134,7 @@ func main() { }) } -func startScanning(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration) { +func startScanning(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration) error { slog.Info("started downscaler") err := scanWorkloads(client, ctx, layerCli, layerEnv, config) @@ -142,8 +145,11 @@ func startScanning(client kubernetes.Client, ctx context.Context, layerCli, laye "CliLayer", *layerCli, "EnvLayer", *layerEnv, ) - os.Exit(1) + + return err } + + return nil } // scanWorkloads scans over all workloads every scan. From 06ec20b1aab99e502fbdee059daeb6ba44c963d3 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:49:11 +0100 Subject: [PATCH 34/58] refactor: added error handling for startScanning --- cmd/kubedownscaler/main.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index a6681cc..7fc0ca5 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -89,7 +89,12 @@ func main() { if err != nil { slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") - startScanning(client, ctx, &layerCli, &layerEnv, config) + err = startScanning(client, ctx, &layerCli, &layerEnv, config) + + if err != nil { + slog.Error("an error occurred while scanning workloads", "error", err) + os.Exit(1) + } return } @@ -119,9 +124,10 @@ func main() { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { slog.Info("started leading") - err := startScanning(client, ctx, &layerCli, &layerEnv, config) + err = startScanning(client, ctx, &layerCli, &layerEnv, config) if err != nil { - return + slog.Error("an error occurred while scanning workloads", "error", err) + cancel() } }, OnStoppedLeading: func() { @@ -134,7 +140,12 @@ func main() { }) } -func startScanning(client kubernetes.Client, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration) error { +func startScanning( + client kubernetes.Client, + ctx context.Context, + layerCli, layerEnv *values.Layer, + config *util.RuntimeConfiguration, +) error { slog.Info("started downscaler") err := scanWorkloads(client, ctx, layerCli, layerEnv, config) From 1fb847cbaad0223d5155f0e37e89145f855010d8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 29 Jan 2025 17:49:50 +0000 Subject: [PATCH 35/58] chore: automatically push pre-commit changes --- cmd/kubedownscaler/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 7fc0ca5..85f0741 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -90,7 +90,6 @@ func main() { slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") err = startScanning(client, ctx, &layerCli, &layerEnv, config) - if err != nil { slog.Error("an error occurred while scanning workloads", "error", err) os.Exit(1) From 4d7561399f0d30c34ef6bb414fbfde6e0133e826 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:52:16 +0100 Subject: [PATCH 36/58] refactor: linter suggestions for error handling --- cmd/kubedownscaler/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 85f0741..8037337 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -89,7 +89,9 @@ func main() { if err != nil { slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") + err = startScanning(client, ctx, &layerCli, &layerEnv, config) + if err != nil { slog.Error("an error occurred while scanning workloads", "error", err) os.Exit(1) From a455add21f1e0cd534478ffb0a871577ff174e77 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 29 Jan 2025 17:52:54 +0000 Subject: [PATCH 37/58] chore: automatically push pre-commit changes --- cmd/kubedownscaler/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 8037337..9db3adf 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -91,7 +91,6 @@ func main() { slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") err = startScanning(client, ctx, &layerCli, &layerEnv, config) - if err != nil { slog.Error("an error occurred while scanning workloads", "error", err) os.Exit(1) From e79c27d0ae2a966bb15d2575cc935963910afdeb Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:55:04 +0100 Subject: [PATCH 38/58] refactor: log message before exiting --- cmd/kubedownscaler/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 9db3adf..6f31748 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -92,7 +92,7 @@ func main() { err = startScanning(client, ctx, &layerCli, &layerEnv, config) if err != nil { - slog.Error("an error occurred while scanning workloads", "error", err) + slog.Error("an error occurred while scanning workloads, exiting", "error", err) os.Exit(1) } From fc6e9e5d1081972c9d9f636f3798db53e5cb7f47 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Thu, 30 Jan 2025 20:32:52 +0100 Subject: [PATCH 39/58] refactor: small refactoring for log and errors --- cmd/kubedownscaler/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 6f31748..0571743 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -132,6 +132,7 @@ func main() { }, OnStoppedLeading: func() { slog.Info("stopped leading") + os.Exit(1) }, OnNewLeader: func(identity string) { slog.Info("new leader elected", "identity", identity) From 55e33e0b4077d3b777ad75d46c78771e61188ad7 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Thu, 30 Jan 2025 20:34:43 +0100 Subject: [PATCH 40/58] refactor: small refactoring for log and errors --- cmd/kubedownscaler/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 0571743..a275c2f 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -158,7 +158,7 @@ func startScanning( "EnvLayer", *layerEnv, ) - return err + return fmt.Errorf("failed to scan over workloads: %w", err) } return nil From e41d8942755c2f134d4b4c84fb860a3f50d73483 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:13:30 +0100 Subject: [PATCH 41/58] refactor: deleted error log from startscanning --- cmd/kubedownscaler/main.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index a275c2f..f07eb7a 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -151,13 +151,6 @@ func startScanning( err := scanWorkloads(client, ctx, layerCli, layerEnv, config) if err != nil { - slog.Error("failed to scan over workloads", - "error", err, - "config", config, - "CliLayer", *layerCli, - "EnvLayer", *layerEnv, - ) - return fmt.Errorf("failed to scan over workloads: %w", err) } From 2ef6154404d2e7f9f12040f443acac85b7b61d5a Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:22:58 +0100 Subject: [PATCH 42/58] refactor: cancel instead of exit for onstoppedleading --- cmd/kubedownscaler/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index f07eb7a..b91f8a7 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -132,7 +132,7 @@ func main() { }, OnStoppedLeading: func() { slog.Info("stopped leading") - os.Exit(1) + cancel() }, OnNewLeader: func(identity string) { slog.Info("new leader elected", "identity", identity) From 3294c3fa709e2704ae685da1ea3217f18479f823 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 4 Feb 2025 21:01:47 +0100 Subject: [PATCH 43/58] feat: added leader election argument --- cmd/kubedownscaler/main.go | 48 +++++++++++++++++++++++-------------- internal/pkg/util/config.go | 8 +++++++ 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index b91f8a7..006efc0 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -34,17 +34,18 @@ const ( func main() { // set defaults for runtime configuration config := &util.RuntimeConfiguration{ - DryRun: false, - Debug: false, - Once: false, - Interval: defaultInterval, - IncludeNamespaces: nil, - IncludeResources: []string{"deployments"}, - ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, - ExcludeWorkloads: nil, - IncludeLabels: nil, - TimeAnnotation: "", - Kubeconfig: "", + DryRun: false, + Debug: false, + Once: false, + LeaderElectionEnabled: false, + Interval: defaultInterval, + IncludeNamespaces: nil, + IncludeResources: []string{"deployments"}, + ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, + ExcludeWorkloads: nil, + IncludeLabels: nil, + TimeAnnotation: "", + Kubeconfig: "", } layerCli := values.NewLayer() @@ -88,14 +89,10 @@ func main() { downscalerNamespace, err := kubernetes.GetCurrentNamespace() if err != nil { slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) - slog.Warn("proceeding without leader election, this may cause multiple instances to conflict when modifying the same resources") - - err = startScanning(client, ctx, &layerCli, &layerEnv, config) - if err != nil { - slog.Error("an error occurred while scanning workloads, exiting", "error", err) - os.Exit(1) - } + } + if !config.LeaderElectionEnabled { + runWithoutLeaderElection(client, ctx, &layerCli, &layerEnv, config) return } @@ -141,6 +138,21 @@ func main() { }) } +func runWithoutLeaderElection( + client kubernetes.Client, + ctx context.Context, + layerCli, layerEnv *values.Layer, + config *util.RuntimeConfiguration, +) { + slog.Warn("proceeding without leader election, this may cause multiple downscaler instances to conflict when modifying the same resources") + + err := startScanning(client, ctx, layerCli, layerEnv, config) + if err != nil { + slog.Error("an error occurred while scanning workloads, exiting", "error", err) + os.Exit(1) + } +} + func startScanning( client kubernetes.Client, ctx context.Context, diff --git a/internal/pkg/util/config.go b/internal/pkg/util/config.go index ab69bd6..8c4de64 100644 --- a/internal/pkg/util/config.go +++ b/internal/pkg/util/config.go @@ -14,6 +14,8 @@ type RuntimeConfiguration struct { Debug bool // Once sets if the scan should only run once. Once bool + // LeaderElectionEnabled sets if leader election should be performed. + LeaderElectionEnabled bool // Interval sets how long to wait between scans. Interval time.Duration // IncludeNamespaces sets the list of namespaces to restrict the downscaler to. @@ -52,6 +54,12 @@ func (c *RuntimeConfiguration) ParseConfigFlags() { false, "run scan only once (default: false)", ) + flag.BoolVar( + &c.Once, + "leader-election-enabled", + false, + "leader election is not enabled (default: false)", + ) flag.Var( (*DurationValue)(&c.Interval), "interval", From 5322c6db4a6d14f93948185b52aeced2805b8e28 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 4 Feb 2025 21:07:55 +0100 Subject: [PATCH 44/58] refactor: chart automatically enables leader election when replicas are greater than 1 --- deployments/chart/templates/deployment.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deployments/chart/templates/deployment.yaml b/deployments/chart/templates/deployment.yaml index ca509b7..4e85000 100644 --- a/deployments/chart/templates/deployment.yaml +++ b/deployments/chart/templates/deployment.yaml @@ -23,6 +23,9 @@ spec: {{- with .Values.arguments }} {{- toYaml . | nindent 10 }} {{- end }} + {{- if gt .Values.replicaCount 1 }} + - --leader-election-enabled + {{- end }} {{- if .Values.constrainedDownscaler }} - --namespace={{ join "," .Values.constrainedNamespaces }} {{- end }} From 2fef53da3a131a80bc6cd11e1dc7a8603ce5084a Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 4 Feb 2025 21:49:38 +0100 Subject: [PATCH 45/58] refactor: main function --- cmd/kubedownscaler/main.go | 89 +++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 30 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 006efc0..3241847 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -32,36 +32,11 @@ const ( ) func main() { - // set defaults for runtime configuration - config := &util.RuntimeConfiguration{ - DryRun: false, - Debug: false, - Once: false, - LeaderElectionEnabled: false, - Interval: defaultInterval, - IncludeNamespaces: nil, - IncludeResources: []string{"deployments"}, - ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, - ExcludeWorkloads: nil, - IncludeLabels: nil, - TimeAnnotation: "", - Kubeconfig: "", - } + // set defaults for runtime configuration and layers + config := initializeConfig() + layerCli, layerEnv := initializeLayers() - layerCli := values.NewLayer() - layerEnv := values.NewLayer() - - // set defaults for layers - layerCli.GracePeriod = defaultGracePeriod - layerCli.DownscaleReplicas = defaultDownscaleReplicas - - config.ParseConfigFlags() - - layerCli.ParseLayerFlags() - - flag.Parse() - - err := layerEnv.GetLayerFromEnv() + err := setupEnv(&layerEnv) if err != nil { slog.Error("failed to get layer from env", "error", err) os.Exit(1) @@ -96,6 +71,60 @@ func main() { return } + runWithLeaderElection(client, downscalerNamespace, cancel, ctx, &layerCli, &layerEnv, config) +} + +func initializeConfig() *util.RuntimeConfiguration { + config := &util.RuntimeConfiguration{ + DryRun: false, + Debug: false, + Once: false, + LeaderElectionEnabled: false, + Interval: defaultInterval, + IncludeNamespaces: nil, + IncludeResources: []string{"deployments"}, + ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, + ExcludeWorkloads: nil, + IncludeLabels: nil, + TimeAnnotation: "", + Kubeconfig: "", + } + config.ParseConfigFlags() + + return config +} + +func initializeLayers() (values.Layer, values.Layer) { + // Initialize layers + layerCli := values.NewLayer() + layerEnv := values.NewLayer() + + // Configure layerCli + layerCli.GracePeriod = defaultGracePeriod + layerCli.DownscaleReplicas = defaultDownscaleReplicas + layerCli.ParseLayerFlags() + flag.Parse() + + return layerCli, layerEnv +} + +func setupEnv(layerEnv *values.Layer) error { + err := layerEnv.GetLayerFromEnv() + if err != nil { + return fmt.Errorf("failed to get layer from env: %w", err) + } + + return nil +} + +func runWithLeaderElection( + client kubernetes.Client, + downscalerNamespace string, + cancel context.CancelFunc, + ctx context.Context, + layerCli, layerEnv *values.Layer, + config *util.RuntimeConfiguration, +) { lease, err := client.CreateLease(leaseName, downscalerNamespace) if err != nil { slog.Warn("failed to create lease", "error", err) @@ -121,7 +150,7 @@ func main() { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { slog.Info("started leading") - err = startScanning(client, ctx, &layerCli, &layerEnv, config) + err = startScanning(client, ctx, layerCli, layerEnv, config) if err != nil { slog.Error("an error occurred while scanning workloads", "error", err) cancel() From 96b6fcad2b0cc7147dc98a020994f4d9947a21b3 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Thu, 6 Feb 2025 20:09:45 +0100 Subject: [PATCH 46/58] refactor: helm chart for lease role and argument --- deployments/chart/templates/_helpers.tpl | 7 +++++++ deployments/chart/templates/deployment.yaml | 2 +- deployments/chart/templates/leaserole.yaml | 2 ++ deployments/chart/values.yaml | 2 ++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/deployments/chart/templates/_helpers.tpl b/deployments/chart/templates/_helpers.tpl index 058f3a8..b8faf00 100644 --- a/deployments/chart/templates/_helpers.tpl +++ b/deployments/chart/templates/_helpers.tpl @@ -23,6 +23,13 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} +{{/* +Check if replicaCount > 1 or if leader election is enabled. If replicaCount is 1, leader election is overridden to true. +*/}} +{{- define "go-kube-downscaler.leaderElection" -}} +{{- and (.Values.replicaCount | int | gt 1) (or .Values.leaderElection false) }} +{{- end }} + {{/* Common labels */}} diff --git a/deployments/chart/templates/deployment.yaml b/deployments/chart/templates/deployment.yaml index 4e85000..4ca8064 100644 --- a/deployments/chart/templates/deployment.yaml +++ b/deployments/chart/templates/deployment.yaml @@ -23,7 +23,7 @@ spec: {{- with .Values.arguments }} {{- toYaml . | nindent 10 }} {{- end }} - {{- if gt .Values.replicaCount 1 }} + {{- if include "go-kube-downscaler.leaderElection" . }} - --leader-election-enabled {{- end }} {{- if .Values.constrainedDownscaler }} diff --git a/deployments/chart/templates/leaserole.yaml b/deployments/chart/templates/leaserole.yaml index 93d4701..aedf278 100644 --- a/deployments/chart/templates/leaserole.yaml +++ b/deployments/chart/templates/leaserole.yaml @@ -1,3 +1,4 @@ +{{- if include "go-kube-downscaler.leaderElection" . }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -28,3 +29,4 @@ roleRef: kind: Role name: {{ include "go-kube-downscaler.fullname" . }}-lease-role apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/deployments/chart/values.yaml b/deployments/chart/values.yaml index 0ecf8e8..4f0df3b 100644 --- a/deployments/chart/values.yaml +++ b/deployments/chart/values.yaml @@ -7,6 +7,8 @@ image: arguments: # - --interval=60 +leaderElection: false + includedResources: - deployments # - statefulsets From 54c1165f133e79988c4de44711d360f1c6d0e1e6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 6 Feb 2025 19:10:34 +0000 Subject: [PATCH 47/58] chore: automatically push pre-commit changes --- deployments/chart/templates/leaserole.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployments/chart/templates/leaserole.yaml b/deployments/chart/templates/leaserole.yaml index aedf278..d1f0d33 100644 --- a/deployments/chart/templates/leaserole.yaml +++ b/deployments/chart/templates/leaserole.yaml @@ -29,4 +29,4 @@ roleRef: kind: Role name: {{ include "go-kube-downscaler.fullname" . }}-lease-role apiGroup: rbac.authorization.k8s.io -{{- end }} \ No newline at end of file +{{- end }} From 533a507d564acda1bd000da66a7d7a6e5330329c Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Thu, 6 Feb 2025 20:33:47 +0100 Subject: [PATCH 48/58] refactor: logic to separate run with and without leader election --- cmd/kubedownscaler/main.go | 89 ++++++++++++++++--------------------- internal/pkg/util/config.go | 8 ++-- 2 files changed, 42 insertions(+), 55 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 3241847..487a659 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -32,16 +32,45 @@ const ( ) func main() { - // set defaults for runtime configuration and layers - config := initializeConfig() - layerCli, layerEnv := initializeLayers() + // set defaults for runtime configuration + config := &util.RuntimeConfiguration{ + DryRun: false, + Debug: false, + Once: false, + Interval: defaultInterval, + IncludeNamespaces: nil, + IncludeResources: []string{"deployments"}, + ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, + ExcludeWorkloads: nil, + IncludeLabels: nil, + TimeAnnotation: "", + Kubeconfig: "", + } + + config.ParseConfigFlags() + + err := config.ParseConfigEnvVars() + if err != nil { + slog.Error("failed to parse env vars for config", "error", err) + os.Exit(1) + } + + layerCli := values.NewLayer() + layerEnv := values.NewLayer() - err := setupEnv(&layerEnv) + err = layerEnv.GetLayerFromEnv() if err != nil { slog.Error("failed to get layer from env", "error", err) os.Exit(1) } + // set defaults for layers + layerCli.GracePeriod = defaultGracePeriod + layerCli.DownscaleReplicas = defaultDownscaleReplicas + layerCli.ParseLayerFlags() + + flag.Parse() + if config.Debug || config.DryRun { slog.SetLogLoggerLevel(slog.LevelDebug) } @@ -61,60 +90,18 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) - downscalerNamespace, err := kubernetes.GetCurrentNamespace() - if err != nil { - slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) - } - - if !config.LeaderElectionEnabled { + if !config.LeaderElection { runWithoutLeaderElection(client, ctx, &layerCli, &layerEnv, config) return } - runWithLeaderElection(client, downscalerNamespace, cancel, ctx, &layerCli, &layerEnv, config) -} - -func initializeConfig() *util.RuntimeConfiguration { - config := &util.RuntimeConfiguration{ - DryRun: false, - Debug: false, - Once: false, - LeaderElectionEnabled: false, - Interval: defaultInterval, - IncludeNamespaces: nil, - IncludeResources: []string{"deployments"}, - ExcludeNamespaces: util.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}, - ExcludeWorkloads: nil, - IncludeLabels: nil, - TimeAnnotation: "", - Kubeconfig: "", - } - config.ParseConfigFlags() - - return config -} - -func initializeLayers() (values.Layer, values.Layer) { - // Initialize layers - layerCli := values.NewLayer() - layerEnv := values.NewLayer() - - // Configure layerCli - layerCli.GracePeriod = defaultGracePeriod - layerCli.DownscaleReplicas = defaultDownscaleReplicas - layerCli.ParseLayerFlags() - flag.Parse() - - return layerCli, layerEnv -} - -func setupEnv(layerEnv *values.Layer) error { - err := layerEnv.GetLayerFromEnv() + downscalerNamespace, err := kubernetes.GetCurrentNamespace() if err != nil { - return fmt.Errorf("failed to get layer from env: %w", err) + slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) + runWithoutLeaderElection(client, ctx, &layerCli, &layerEnv, config) } - return nil + runWithLeaderElection(client, downscalerNamespace, cancel, ctx, &layerCli, &layerEnv, config) } func runWithLeaderElection( diff --git a/internal/pkg/util/config.go b/internal/pkg/util/config.go index 8c4de64..ac04420 100644 --- a/internal/pkg/util/config.go +++ b/internal/pkg/util/config.go @@ -14,8 +14,8 @@ type RuntimeConfiguration struct { Debug bool // Once sets if the scan should only run once. Once bool - // LeaderElectionEnabled sets if leader election should be performed. - LeaderElectionEnabled bool + // LeaderElection sets if leader election should be performed. + LeaderElection bool // Interval sets how long to wait between scans. Interval time.Duration // IncludeNamespaces sets the list of namespaces to restrict the downscaler to. @@ -56,9 +56,9 @@ func (c *RuntimeConfiguration) ParseConfigFlags() { ) flag.BoolVar( &c.Once, - "leader-election-enabled", + "leader-election", false, - "leader election is not enabled (default: false)", + "enables leader election (default: false)", ) flag.Var( (*DurationValue)(&c.Interval), From d27448b130a6839c184b3fffd67bff83dcd075a8 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Fri, 7 Feb 2025 15:24:34 +0100 Subject: [PATCH 49/58] refactor: helm chart logic and arguments for leader election --- deployments/chart/templates/_helpers.tpl | 4 ++-- deployments/chart/templates/deployment.yaml | 2 +- deployments/chart/values.yaml | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/deployments/chart/templates/_helpers.tpl b/deployments/chart/templates/_helpers.tpl index b8faf00..1024ff1 100644 --- a/deployments/chart/templates/_helpers.tpl +++ b/deployments/chart/templates/_helpers.tpl @@ -24,10 +24,10 @@ Create chart name and version as used by the chart label. {{- end }} {{/* -Check if replicaCount > 1 or if leader election is enabled. If replicaCount is 1, leader election is overridden to true. +If replicaCount > leader election is enabled by default. */}} {{- define "go-kube-downscaler.leaderElection" -}} -{{- and (.Values.replicaCount | int | gt 1) (or .Values.leaderElection false) }} +{{- if (.Values.replicaCount | int | gt 1) }} {{- end }} {{/* diff --git a/deployments/chart/templates/deployment.yaml b/deployments/chart/templates/deployment.yaml index 4ca8064..20d4b30 100644 --- a/deployments/chart/templates/deployment.yaml +++ b/deployments/chart/templates/deployment.yaml @@ -24,7 +24,7 @@ spec: {{- toYaml . | nindent 10 }} {{- end }} {{- if include "go-kube-downscaler.leaderElection" . }} - - --leader-election-enabled + - --leader-election {{- end }} {{- if .Values.constrainedDownscaler }} - --namespace={{ join "," .Values.constrainedNamespaces }} diff --git a/deployments/chart/values.yaml b/deployments/chart/values.yaml index 4f0df3b..41b87f1 100644 --- a/deployments/chart/values.yaml +++ b/deployments/chart/values.yaml @@ -1,4 +1,5 @@ replicaCount: 1 +# If replicaCount is greater than 1, the leader election is enabled by default image: repository: ghcr.io/caas-team/gokubedownscaler @@ -7,8 +8,6 @@ image: arguments: # - --interval=60 -leaderElection: false - includedResources: - deployments # - statefulsets From a3a95dcfa990270e5d269c5bb68f2aa4f6bb0bbe Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Mon, 10 Feb 2025 13:16:31 +0100 Subject: [PATCH 50/58] refactor: helm chart comments --- deployments/chart/templates/_helpers.tpl | 2 +- deployments/chart/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deployments/chart/templates/_helpers.tpl b/deployments/chart/templates/_helpers.tpl index 1024ff1..91675b5 100644 --- a/deployments/chart/templates/_helpers.tpl +++ b/deployments/chart/templates/_helpers.tpl @@ -24,7 +24,7 @@ Create chart name and version as used by the chart label. {{- end }} {{/* -If replicaCount > leader election is enabled by default. +If replicaCount is greater than 1 leader election is enabled by default. */}} {{- define "go-kube-downscaler.leaderElection" -}} {{- if (.Values.replicaCount | int | gt 1) }} diff --git a/deployments/chart/values.yaml b/deployments/chart/values.yaml index 41b87f1..5d60115 100644 --- a/deployments/chart/values.yaml +++ b/deployments/chart/values.yaml @@ -1,5 +1,5 @@ -replicaCount: 1 # If replicaCount is greater than 1, the leader election is enabled by default +replicaCount: 1 image: repository: ghcr.io/caas-team/gokubedownscaler From 8611b42898189b3aeacc5835cfc34b35aab1670a Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Mon, 10 Feb 2025 13:20:54 +0100 Subject: [PATCH 51/58] refactor: exiting when namespace couldn't be retrieved --- cmd/kubedownscaler/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 487a659..f6c8a8c 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -98,7 +98,7 @@ func main() { downscalerNamespace, err := kubernetes.GetCurrentNamespace() if err != nil { slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) - runWithoutLeaderElection(client, ctx, &layerCli, &layerEnv, config) + os.Exit(1) } runWithLeaderElection(client, downscalerNamespace, cancel, ctx, &layerCli, &layerEnv, config) From 8cf28d79747f1984e104490c6a7cc5d3d6f5ce6b Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 11 Feb 2025 13:16:40 +0100 Subject: [PATCH 52/58] refactor: helm chart typos --- deployments/chart/templates/leaserole.yaml | 2 +- deployments/chart/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deployments/chart/templates/leaserole.yaml b/deployments/chart/templates/leaserole.yaml index d1f0d33..f44f6da 100644 --- a/deployments/chart/templates/leaserole.yaml +++ b/deployments/chart/templates/leaserole.yaml @@ -1,4 +1,4 @@ -{{- if include "go-kube-downscaler.leaderElection" . }} +{{- if include "go-kube-downscaler.leaderElection" . -}} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: diff --git a/deployments/chart/values.yaml b/deployments/chart/values.yaml index 5d60115..2fdf014 100644 --- a/deployments/chart/values.yaml +++ b/deployments/chart/values.yaml @@ -1,4 +1,4 @@ -# If replicaCount is greater than 1, the leader election is enabled by default +# If replicaCount is greater than 1, leader election is enabled by default replicaCount: 1 image: From b019c0cba364125c68c3a3f1064c4a5cc5160600 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 11 Feb 2025 13:18:21 +0100 Subject: [PATCH 53/58] refactor: improved log messages and log level --- cmd/kubedownscaler/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index f6c8a8c..091fe51 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -97,7 +97,7 @@ func main() { downscalerNamespace, err := kubernetes.GetCurrentNamespace() if err != nil { - slog.Warn("couldn't get namespace or running outside of cluster; skipping leader election", "error", err) + slog.Error("couldn't get namespace or running outside of cluster", "error", err) os.Exit(1) } @@ -114,7 +114,7 @@ func runWithLeaderElection( ) { lease, err := client.CreateLease(leaseName, downscalerNamespace) if err != nil { - slog.Warn("failed to create lease", "error", err) + slog.Error("failed to create lease", "error", err) os.Exit(1) } @@ -160,7 +160,7 @@ func runWithoutLeaderElection( layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration, ) { - slog.Warn("proceeding without leader election, this may cause multiple downscaler instances to conflict when modifying the same resources") + slog.Warn("proceeding without leader election; this could cause errors when running with multiple replicas") err := startScanning(client, ctx, layerCli, layerEnv, config) if err != nil { From 47f7eb3f432b4a9fb7c2d2d09b55ec055ddc5e09 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 11 Feb 2025 13:25:07 +0100 Subject: [PATCH 54/58] refactor: getCurrentNamespace now called inside CreateLease function --- cmd/kubedownscaler/main.go | 11 ++--------- internal/api/kubernetes/client.go | 10 ++++++++-- internal/api/kubernetes/util.go | 2 +- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 091fe51..75f2aae 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -95,24 +95,17 @@ func main() { return } - downscalerNamespace, err := kubernetes.GetCurrentNamespace() - if err != nil { - slog.Error("couldn't get namespace or running outside of cluster", "error", err) - os.Exit(1) - } - - runWithLeaderElection(client, downscalerNamespace, cancel, ctx, &layerCli, &layerEnv, config) + runWithLeaderElection(client, cancel, ctx, &layerCli, &layerEnv, config) } func runWithLeaderElection( client kubernetes.Client, - downscalerNamespace string, cancel context.CancelFunc, ctx context.Context, layerCli, layerEnv *values.Layer, config *util.RuntimeConfiguration, ) { - lease, err := client.CreateLease(leaseName, downscalerNamespace) + lease, err := client.CreateLease(leaseName) if err != nil { slog.Error("failed to create lease", "error", err) os.Exit(1) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index 9054880..e7f8e67 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -36,7 +36,7 @@ type Client interface { // UpscaleWorkload upscales the workload to the original replicas UpscaleWorkload(workload scalable.Workload, ctx context.Context) error // CreateLease creates a new lease for the downscaler - CreateLease(leaseName, leaseNamespace string) (*resourcelock.LeaseLock, error) + CreateLease(leaseName string) (*resourcelock.LeaseLock, error) // addWorkloadEvent creates a new event on the workload addWorkloadEvent(eventType string, reason string, id string, message string, workload scalable.Workload, ctx context.Context) error } @@ -245,13 +245,19 @@ func (c client) addWorkloadEvent(eventType, reason, identifier, message string, return nil } -func (c client) CreateLease(leaseName, leaseNamespace string) (*resourcelock.LeaseLock, error) { +func (c client) CreateLease(leaseName string) (*resourcelock.LeaseLock, error) { hostname, err := os.Hostname() if err != nil { slog.Error("failed to get hostname", "error", err) return nil, fmt.Errorf("failed to get hostname: %w", err) } + leaseNamespace, err := getCurrentNamespace() + if err != nil { + slog.Error("couldn't get namespace or running outside of cluster", "error", err) + os.Exit(1) + } + lease := &resourcelock.LeaseLock{ LeaseMeta: metav1.ObjectMeta{ Name: leaseName, diff --git a/internal/api/kubernetes/util.go b/internal/api/kubernetes/util.go index 39042fd..8da09e6 100644 --- a/internal/api/kubernetes/util.go +++ b/internal/api/kubernetes/util.go @@ -18,7 +18,7 @@ func getConfig(kubeconfig string) (*rest.Config, error) { } // GetCurrentNamespace retrieves downscaler namespace from its service account file. -func GetCurrentNamespace() (string, error) { +func getCurrentNamespace() (string, error) { const namespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" namespace, err := os.ReadFile(namespaceFile) From 64e677915da20ee1a6bb10e4dcabec7829dcd46e Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 11 Feb 2025 13:28:39 +0100 Subject: [PATCH 55/58] refactor: merged scanWorkloads into startScanning --- cmd/kubedownscaler/main.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index 75f2aae..dc75918 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -170,21 +170,6 @@ func startScanning( ) error { slog.Info("started downscaler") - err := scanWorkloads(client, ctx, layerCli, layerEnv, config) - if err != nil { - return fmt.Errorf("failed to scan over workloads: %w", err) - } - - return nil -} - -// scanWorkloads scans over all workloads every scan. -func scanWorkloads( - client kubernetes.Client, - ctx context.Context, - layerCli, layerEnv *values.Layer, - config *util.RuntimeConfiguration, -) error { for { slog.Info("scanning workloads") From a46d0ec9553c6110f60ff80c7c19b1cf253e1ec6 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Tue, 11 Feb 2025 17:37:30 +0100 Subject: [PATCH 56/58] refactor: returning error when namespace can't be retrieved --- internal/api/kubernetes/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index e7f8e67..e352e44 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -255,7 +255,7 @@ func (c client) CreateLease(leaseName string) (*resourcelock.LeaseLock, error) { leaseNamespace, err := getCurrentNamespace() if err != nil { slog.Error("couldn't get namespace or running outside of cluster", "error", err) - os.Exit(1) + return nil, fmt.Errorf("couldn't get namespace: %w", err) } lease := &resourcelock.LeaseLock{ From ca13f84219bc1d5ba21eb361d62e33417a26271f Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Wed, 12 Feb 2025 17:42:44 +0100 Subject: [PATCH 57/58] refactor: returning error from createLease function --- internal/api/kubernetes/client.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index e352e44..b4b5b7c 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -254,8 +254,7 @@ func (c client) CreateLease(leaseName string) (*resourcelock.LeaseLock, error) { leaseNamespace, err := getCurrentNamespace() if err != nil { - slog.Error("couldn't get namespace or running outside of cluster", "error", err) - return nil, fmt.Errorf("couldn't get namespace: %w", err) + return nil, fmt.Errorf("failed to get namespace or running outside of cluster: %w", err) } lease := &resourcelock.LeaseLock{ From 2e22c50f9acf608fd1f831b903f54ab60db08d57 Mon Sep 17 00:00:00 2001 From: Samuel Esposito <40698384+samuel-esp@users.noreply.github.com> Date: Thu, 13 Feb 2025 08:51:24 +0100 Subject: [PATCH 58/58] refactor: moved defer cancel, removed log error --- cmd/kubedownscaler/main.go | 4 ++-- internal/api/kubernetes/client.go | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/kubedownscaler/main.go b/cmd/kubedownscaler/main.go index dc75918..415d913 100644 --- a/cmd/kubedownscaler/main.go +++ b/cmd/kubedownscaler/main.go @@ -90,6 +90,8 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if !config.LeaderElection { runWithoutLeaderElection(client, ctx, &layerCli, &layerEnv, config) return @@ -111,8 +113,6 @@ func runWithLeaderElection( os.Exit(1) } - defer cancel() - sigs := make(chan os.Signal, 1) signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) diff --git a/internal/api/kubernetes/client.go b/internal/api/kubernetes/client.go index b4b5b7c..df8a059 100644 --- a/internal/api/kubernetes/client.go +++ b/internal/api/kubernetes/client.go @@ -248,7 +248,6 @@ func (c client) addWorkloadEvent(eventType, reason, identifier, message string, func (c client) CreateLease(leaseName string) (*resourcelock.LeaseLock, error) { hostname, err := os.Hostname() if err != nil { - slog.Error("failed to get hostname", "error", err) return nil, fmt.Errorf("failed to get hostname: %w", err) }