Skip to content

Commit

Permalink
Implement caching rules (#272)
Browse files Browse the repository at this point in the history
* code cleanup

Sorting imports.

* add label selector for nats-manager

Adds a labelselector for nats-manager instance.

* add label selector for created-by: nats-manager

* add cache options

* add client options

* add caching rules via client and cache to main

* move label package from internal to pkg

* add managed-by label

* clean up labels

* fix unit test

* add statefulset

* fix unit tests

* remove client

* use labelselector for more resources

* fix comment

* remove client
  • Loading branch information
friedrichwilken authored Jan 4, 2024
1 parent 0aeccd5 commit 6fd3d93
Show file tree
Hide file tree
Showing 5 changed files with 272 additions and 13 deletions.
25 changes: 12 additions & 13 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,19 +20,14 @@ import (
"flag"
"os"

"go.uber.org/zap"
"go.uber.org/zap/zapcore"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/kyma-project/nats-manager/pkg/env"
"github.com/kyma-project/nats-manager/pkg/k8s"
"github.com/kyma-project/nats-manager/pkg/k8s/chart"
"github.com/kyma-project/nats-manager/pkg/manager"

// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"

"go.uber.org/zap"
"go.uber.org/zap/zapcore"
apiclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
Expand All @@ -42,10 +37,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"

apiclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"

natsv1alpha1 "github.com/kyma-project/nats-manager/api/v1alpha1"
controllercache "github.com/kyma-project/nats-manager/internal/controller/cache"
natscontroller "github.com/kyma-project/nats-manager/internal/controller/nats"
"github.com/kyma-project/nats-manager/pkg/env"
"github.com/kyma-project/nats-manager/pkg/k8s"
"github.com/kyma-project/nats-manager/pkg/k8s/chart"
"github.com/kyma-project/nats-manager/pkg/manager"
)

const defaultMetricsPort = 9443
Expand Down Expand Up @@ -115,8 +113,6 @@ func main() { //nolint:funlen // main function needs to initialize many objects
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: leaderElectionID,
Metrics: server.Options{BindAddress: metricsAddr},
WebhookServer: webhook.NewServer(webhook.Options{Port: metricsPort}),
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
Expand All @@ -128,6 +124,9 @@ func main() { //nolint:funlen // main function needs to initialize many objects
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
Metrics: server.Options{BindAddress: metricsAddr},
WebhookServer: webhook.NewServer(webhook.Options{Port: metricsPort}),
NewCache: controllercache.New,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
Expand Down
44 changes: 44 additions & 0 deletions internal/controller/cache/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
package cache

import (
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"

natslabels "github.com/kyma-project/nats-manager/pkg/labels"
)

// New returns a cache with the cache-options applied, generade form the rest-config.
func New(config *rest.Config, options cache.Options) (cache.Cache, error) {
return cache.New(config, applySelectors(options))
}

func applySelectors(options cache.Options) cache.Options {
// The only objects we allow are the ones with the 'managed-by: nats-manager' label applied.
managedByNATS := fromLabelSelector(natslabels.SelectorManagedByNATS())

// Apply the label selector to all relevant objects.
options.ByObject = map[client.Object]cache.ByObject{
&appsv1.Deployment{}: managedByNATS,
&appsv1.StatefulSet{}: managedByNATS,
&corev1.ServiceAccount{}: managedByNATS,
&corev1.Secret{}: managedByNATS,
&corev1.Service{}: managedByNATS,
&corev1.ConfigMap{}: managedByNATS,
&rbacv1.ClusterRole{}: managedByNATS,
&rbacv1.ClusterRoleBinding{}: managedByNATS,
&autoscalingv1.HorizontalPodAutoscaler{}: managedByNATS,
&policyv1.PodDisruptionBudget{}: managedByNATS,
}
return options
}

func fromLabelSelector(selector labels.Selector) cache.ByObject {
return cache.ByObject{Label: selector}
}
169 changes: 169 additions & 0 deletions internal/controller/cache/cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
package cache

import (
"fmt"
"reflect"
"testing"
"time"

natslabels "github.com/kyma-project/nats-manager/pkg/labels"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func Test_applySelectors(t *testing.T) {
// given
syncPeriod := 30 * time.Second
selector := cache.ByObject{
Label: labels.SelectorFromSet(
map[string]string{
natslabels.KeyManagedBy: natslabels.ValueNATSManager,
},
),
}

type args struct {
options cache.Options
}
testCases := []struct {
name string
args args
want cache.Options
}{
{
name: "should apply the correct selectors",
args: args{
options: cache.Options{},
},
want: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&appsv1.Deployment{}: selector,
&appsv1.StatefulSet{}: selector,
&corev1.ServiceAccount{}: selector,
&corev1.Secret{}: selector,
&corev1.Service{}: selector,
&corev1.ConfigMap{}: selector,
&rbacv1.ClusterRole{}: selector,
&rbacv1.ClusterRoleBinding{}: selector,
&autoscalingv1.HorizontalPodAutoscaler{}: selector,
&policyv1.PodDisruptionBudget{}: selector,
},
},
},
{
name: "should not remove existing options",
args: args{
options: cache.Options{
SyncPeriod: &syncPeriod,
},
},
want: cache.Options{
SyncPeriod: &syncPeriod,
ByObject: map[client.Object]cache.ByObject{
&appsv1.Deployment{}: selector,
&appsv1.StatefulSet{}: selector,
&corev1.ServiceAccount{}: selector,
&corev1.Secret{}: selector,
&corev1.Service{}: selector,
&corev1.ConfigMap{}: selector,
&rbacv1.ClusterRole{}: selector,
&rbacv1.ClusterRoleBinding{}: selector,
&autoscalingv1.HorizontalPodAutoscaler{}: selector,
&policyv1.PodDisruptionBudget{}: selector,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// when
got := applySelectors(tc.args.options)

// then
require.True(t, deepEqualOptions(tc.want, got))
})
}
}

func deepEqualOptions(a, b cache.Options) bool {
// we only care about the ByObject comparison
o := deepEqualByObject(a.ByObject, b.ByObject)
s := a.SyncPeriod == b.SyncPeriod
return o && s
}

func deepEqualByObject(a, b map[client.Object]cache.ByObject) bool {
if len(a) != len(b) {
return false
}

aTypeMap := make(map[string]cache.ByObject, len(a))
bTypeMap := make(map[string]cache.ByObject, len(a))
computeTypeMap(a, aTypeMap)
computeTypeMap(b, bTypeMap)
return reflect.DeepEqual(aTypeMap, bTypeMap)
}

func computeTypeMap(byObjectMap map[client.Object]cache.ByObject, typeMap map[string]cache.ByObject) {
keyOf := func(i interface{}) string { return fmt.Sprintf(">>> %T", i) }
for k, v := range byObjectMap {
if obj, ok := k.(*appsv1.Deployment); ok {
key := keyOf(obj)
typeMap[key] = v
}
if obj, ok := k.(*corev1.ServiceAccount); ok {
key := keyOf(obj)
typeMap[key] = v
}
if obj, ok := k.(*rbacv1.ClusterRole); ok {
key := keyOf(obj)
typeMap[key] = v
}
if obj, ok := k.(*rbacv1.ClusterRoleBinding); ok {
key := keyOf(obj)
typeMap[key] = v
}
if obj, ok := k.(*autoscalingv1.HorizontalPodAutoscaler); ok {
key := keyOf(obj)
typeMap[key] = v
}
}
}

func Test_fromLabelSelector(t *testing.T) {
// given
type args struct {
label labels.Selector
}
tests := []struct {
name string
args args
want cache.ByObject
}{
{
name: "should return the correct selector",
args: args{
label: labels.SelectorFromSet(map[string]string{"key": "value"}),
},
want: cache.ByObject{
Label: labels.SelectorFromSet(map[string]string{"key": "value"}),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// when
got := fromLabelSelector(tt.args.label)

// then
require.Equal(t, tt.want, got)
})
}
}
26 changes: 26 additions & 0 deletions pkg/labels/labels.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package labels

import (
"k8s.io/apimachinery/pkg/labels"
)

const (
// Kubernetes label keys used by nats-manager.
KeyComponent = "app.kubernetes.io/component"
KeyCreatedBy = "app.kubernetes.io/created-by"
KeyInstance = "app.kubernetes.io/instance"
KeyManagedBy = "app.kubernetes.io/managed-by"
KeyName = "app.kubernetes.io/name"
KeyPartOf = "app.kubernetes.io/part-of"
KeyDashboard = "kyma-project.io/dashboard"

// Kubernetes label values used by nats-manager.
ValueNATS = "nats"
ValueNATSManager = "nats-manager"
)

// SelectorManagedByNATS returns a labelselector for managed-by ("app.kubernetes.io/managed-by") as used
// by the nats-manager.
func SelectorManagedByNATS() labels.Selector {
return labels.SelectorFromSet(map[string]string{KeyManagedBy: ValueNATSManager})
}
21 changes: 21 additions & 0 deletions pkg/labels/labels_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package labels

import (
"reflect"
"testing"

"k8s.io/apimachinery/pkg/labels"
)

func TestSelectorManagedByNATS(t *testing.T) {
// arrange
wantedSelector := labels.SelectorFromSet(map[string]string{"app.kubernetes.io/managed-by": "nats-manager"})

// act
actualSelector := SelectorManagedByNATS()

// assert
if !reflect.DeepEqual(wantedSelector, actualSelector) {
t.Errorf("Expected %v, but got %v", wantedSelector, actualSelector)
}
}

0 comments on commit 6fd3d93

Please sign in to comment.