From cc78c94c97faecb1897c1129a312b1104373cce2 Mon Sep 17 00:00:00 2001 From: Mansur Uralov Date: Wed, 13 Sep 2023 09:44:36 +0200 Subject: [PATCH 1/3] Delete Cluster Scoped Resources * Cluster role and binding were not deleted as k8s garbage collector cannot if namespace scoped owns cluster scoped * Add tests * Improve conditions --- api/v1alpha1/eventing_types.go | 15 ++- api/v1alpha1/status.go | 10 ++ api/v1alpha1/zz_generated.deepcopy.go | 2 +- internal/controller/eventing/controller.go | 23 +++- .../controller/integration_test.go | 66 ++++++----- internal/controller/eventing/status.go | 9 +- pkg/k8s/client.go | 29 +++++ pkg/k8s/client_test.go | 107 ++++++++++++++++++ pkg/k8s/mocks/client.go | 88 ++++++++++++++ test/utils/integration/integration.go | 31 ++++- 10 files changed, 340 insertions(+), 40 deletions(-) diff --git a/api/v1alpha1/eventing_types.go b/api/v1alpha1/eventing_types.go index 0fe27bb7..fd198ca6 100644 --- a/api/v1alpha1/eventing_types.go +++ b/api/v1alpha1/eventing_types.go @@ -39,6 +39,7 @@ const ( // common reasons ConditionReasonProcessing ConditionReason = "Processing" + ConditionReasonDeleted ConditionReason = "Deleted" // publisher proxy reasons ConditionReasonDeployed ConditionReason = "Deployed" @@ -49,18 +50,22 @@ const ( ConditionReasonForbidden ConditionReason = "Forbidden" ConditionReasonWebhookFailed ConditionReason = "WebhookFailed" ConditionReasonWebhookReady ConditionReason = "Ready" + ConditionReasonDeletedFailed ConditionReason = "DeletionFailed" // message for conditions - ConditionPublisherProxyReadyMessage = "Publisher proxy is deployed" - ConditionNATSAvailableMessage = "NATS is available" - ConditionWebhookReadyMessage = "Webhook is available" - ConditionPublisherProxyProcessingMessage = "Eventing publisher proxy deployment is in progress" - ConditionSubscriptionManagerReadyMessage = "Subscription manager is ready" + ConditionPublisherProxyReadyMessage = "Publisher proxy is deployed" + ConditionPublisherProxyDeletedMessage = "Publisher proxy is deleted" + ConditionNATSAvailableMessage = "NATS is available" + ConditionWebhookReadyMessage = "Webhook is available" + ConditionPublisherProxyProcessingMessage = "Eventing publisher proxy deployment is in progress" + ConditionSubscriptionManagerReadyMessage = "Subscription manager is ready" + ConditionSubscriptionManagerStoppedMessage = "Subscription manager is stopped" // subscription manager reasons ConditionReasonEventMeshSubManagerReady ConditionReason = "EventMeshSubscriptionManagerReady" ConditionReasonEventMeshSubManagerFailed ConditionReason = "EventMeshSubscriptionManagerFailed" ConditionReasonEventMeshSubManagerStopFailed ConditionReason = "EventMeshSubscriptionManagerStopFailed" + ConditionReasonEventMeshSubManagerStop ConditionReason = "Stopped" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. diff --git a/api/v1alpha1/status.go b/api/v1alpha1/status.go index 45830178..0a859fee 100644 --- a/api/v1alpha1/status.go +++ b/api/v1alpha1/status.go @@ -71,6 +71,16 @@ func (es *EventingStatus) SetNATSAvailableConditionToTrue() { es.UpdateConditionNATSAvailable(metav1.ConditionTrue, ConditionReasonNATSAvailable, ConditionNATSAvailableMessage) } +func (es *EventingStatus) SetSubscriptionManagerConditionToFalse(reason ConditionReason, message string) { + es.UpdateConditionSubscriptionManagerReady(metav1.ConditionFalse, reason, + message) +} + +func (es *EventingStatus) SetPublisherProxyConditionToFalse(reason ConditionReason, message string) { + es.UpdateConditionPublisherProxyReady(metav1.ConditionFalse, reason, + message) +} + func (es *EventingStatus) SetPublisherProxyReadyToTrue() { es.State = StateReady es.UpdateConditionPublisherProxyReady(metav1.ConditionTrue, ConditionReasonDeployed, ConditionPublisherProxyReadyMessage) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 99b1cdc5..28204a61 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/internal/controller/eventing/controller.go b/internal/controller/eventing/controller.go index d5420406..c3cc8230 100644 --- a/internal/controller/eventing/controller.go +++ b/internal/controller/eventing/controller.go @@ -19,7 +19,6 @@ package eventing import ( "context" "fmt" - eventingv1alpha1 "github.com/kyma-project/eventing-manager/api/v1alpha1" "github.com/kyma-project/eventing-manager/pkg/env" "github.com/kyma-project/eventing-manager/pkg/eventing" @@ -234,10 +233,32 @@ func (r *Reconciler) handleEventingDeletion(ctx context.Context, eventing *event eventing, err, log) } } + eventing.Status.SetSubscriptionManagerConditionToFalse( + eventingv1alpha1.ConditionReasonEventMeshSubManagerStop, + eventingv1alpha1.ConditionSubscriptionManagerStoppedMessage) + + // delete cluster-scoped resources, such as clusterrole and clusterrolebinding. + if err := r.deleteClusterScopedResources(ctx, eventing); err != nil { + return ctrl.Result{}, r.syncStatusWithPublisherProxyErrWithReason(ctx, + eventingv1alpha1.ConditionReasonDeletedFailed, eventing, err, log) + } + eventing.Status.SetPublisherProxyConditionToFalse( + eventingv1alpha1.ConditionReasonDeleted, + eventingv1alpha1.ConditionPublisherProxyDeletedMessage) return r.removeFinalizer(ctx, eventing) } +// deleteClusterScopedResources deletes cluster-scoped resources, such as clusterrole and clusterrolebinding. +// K8s doesn't support cleaning cluster-scoped resources owned by namespace-scoped resources: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/#owner-references-in-object-specifications +func (r *Reconciler) deleteClusterScopedResources(ctx context.Context, eventingCR *eventingv1alpha1.Eventing) error { + if err := r.kubeClient.DeleteClusterRole(ctx, eventing.GetPublisherClusterRoleName(*eventingCR), eventingCR.Namespace); err != nil { + return err + } + return r.kubeClient.DeleteClusterRoleBinding(ctx, eventing.GetPublisherClusterRoleBindingName(*eventingCR), eventingCR.Namespace) +} + func (r *Reconciler) handleEventingReconcile(ctx context.Context, eventing *eventingv1alpha1.Eventing, log *zap.SugaredLogger) (ctrl.Result, error) { log.Info("handling Eventing reconciliation...") diff --git a/internal/controller/eventing/integrationtests/controller/integration_test.go b/internal/controller/eventing/integrationtests/controller/integration_test.go index 50d1bc69..370c5aa1 100644 --- a/internal/controller/eventing/integrationtests/controller/integration_test.go +++ b/internal/controller/eventing/integrationtests/controller/integration_test.go @@ -150,6 +150,7 @@ func Test_CreateEventingCR_NATS(t *testing.T) { testEnvironment.EnsureDeploymentDeletion(t, eventing.GetPublisherDeploymentName(*tc.givenEventing), givenNamespace) } testEnvironment.EnsureK8sResourceDeleted(t, tc.givenNATS) + testEnvironment.EnsureNamespaceDeleted(t, givenNamespace) }() // then @@ -225,6 +226,7 @@ func Test_UpdateEventingCR(t *testing.T) { testEnvironment.EnsureDeploymentDeletion(t, givenEPPDeploymentName, givenNamespace) } testEnvironment.EnsureK8sResourceDeleted(t, nats) + testEnvironment.EnsureNamespaceDeleted(t, givenNamespace) }() // get Eventing CR. @@ -288,6 +290,7 @@ func Test_WatcherEventingCRK8sObjects(t *testing.T) { name string givenEventing *eventingv1alpha1.Eventing wantResourceDeletion []deletionFunc + runForRealCluster bool }{ { name: "should recreate Publish Service", @@ -344,29 +347,30 @@ func Test_WatcherEventingCRK8sObjects(t *testing.T) { deleteHPAFromK8s, }, }, - // @TODO: Fix the watching of ClusterRoles and ClusterRoleBindings - //{ - // name: "should recreate ClusterRole", - // givenEventing: utils.NewEventingCR( - // utils.WithEventingCRMinimal(), - // utils.WithEventingStreamData("Memory", "1M", "1M", 1, 1), - // utils.WithEventingPublisherData(1, 1, "199m", "99Mi", "399m", "199Mi"), - // ), - // wantResourceDeletion: []deletionFunc{ - // deleteClusterRoleFromK8s, - // }, - //}, - //{ - // name: "should recreate ClusterRoleBinding", - // givenEventing: utils.NewEventingCR( - // utils.WithEventingCRMinimal(), - // utils.WithEventingStreamData("Memory", "1M", "1M", 1, 1), - // utils.WithEventingPublisherData(1, 1, "199m", "99Mi", "399m", "199Mi"), - // ), - // wantResourceDeletion: []deletionFunc{ - // deleteClusterRoleBindingFromK8s, - // }, - //}, + { + name: "should recreate ClusterRole", + givenEventing: utils.NewEventingCR( + utils.WithEventingCRMinimal(), + utils.WithEventingStreamData("Memory", "1M", 1, 1), + utils.WithEventingPublisherData(1, 1, "199m", "99Mi", "399m", "199Mi"), + ), + wantResourceDeletion: []deletionFunc{ + deleteClusterRoleFromK8s, + }, + runForRealCluster: true, + }, + { + name: "should recreate ClusterRoleBinding", + givenEventing: utils.NewEventingCR( + utils.WithEventingCRMinimal(), + utils.WithEventingStreamData("Memory", "1M", 1, 1), + utils.WithEventingPublisherData(1, 1, "199m", "99Mi", "399m", "199Mi"), + ), + wantResourceDeletion: []deletionFunc{ + deleteClusterRoleBindingFromK8s, + }, + runForRealCluster: true, + }, { name: "should recreate all objects", givenEventing: utils.NewEventingCR( @@ -390,6 +394,10 @@ func Test_WatcherEventingCRK8sObjects(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() + if !*testEnvironment.EnvTestInstance.UseExistingCluster && tc.runForRealCluster { + t.Skip("Skipping test case as it can only be run on real cluster") + } + // given g := gomega.NewWithT(t) eventingcontroller.IsDeploymentReady = func(deployment *v1.Deployment) bool { @@ -417,6 +425,7 @@ func Test_WatcherEventingCRK8sObjects(t *testing.T) { testEnvironment.EnsureDeploymentDeletion(t, eventing.GetPublisherDeploymentName(*tc.givenEventing), givenNamespace) } testEnvironment.EnsureK8sResourceDeleted(t, nats) + testEnvironment.EnsureNamespaceDeleted(t, givenNamespace) }() // check Eventing CR status. @@ -526,6 +535,7 @@ func Test_CreateEventingCR_EventMesh(t *testing.T) { if !*testEnvironment.EnvTestInstance.UseExistingCluster && !tc.shouldFailSubManager { testEnvironment.EnsureDeploymentDeletion(t, eventing.GetPublisherDeploymentName(*tc.givenEventing), givenNamespace) } + testEnvironment.EnsureNamespaceDeleted(t, givenNamespace) }() // then @@ -611,6 +621,7 @@ func Test_DeleteEventingCR(t *testing.T) { if tc.givenEventing.Spec.Backend.Type == eventingv1alpha1.NatsBackendType { testEnvironment.EnsureK8sResourceDeleted(t, nats) } + testEnvironment.EnsureNamespaceDeleted(t, givenNamespace) }() testEnvironment.EnsureDeploymentExists(t, eventing.GetPublisherDeploymentName(*tc.givenEventing), givenNamespace) @@ -630,15 +641,16 @@ func Test_DeleteEventingCR(t *testing.T) { eventing.GetPublisherHealthServiceName(*tc.givenEventing), givenNamespace) testEnvironment.EnsureK8sServiceAccountNotFound(t, eventing.GetPublisherServiceAccountName(*tc.givenEventing), givenNamespace) - testEnvironment.EnsureK8sClusterRoleNotFound(t, - eventing.GetPublisherClusterRoleName(*tc.givenEventing), givenNamespace) - testEnvironment.EnsureK8sClusterRoleBindingNotFound(t, - eventing.GetPublisherClusterRoleBindingName(*tc.givenEventing), givenNamespace) } else { // check if the owner reference is set. // if owner reference is set then these resources would be garbage collected in real k8s cluster. testEnvironment.EnsureEPPK8sResourcesHaveOwnerReference(t, *tc.givenEventing) + // ensure clusterrole and clusterrolebindings are deleted. } + testEnvironment.EnsureK8sClusterRoleNotFound(t, + eventing.GetPublisherClusterRoleName(*tc.givenEventing), givenNamespace) + testEnvironment.EnsureK8sClusterRoleBindingNotFound(t, + eventing.GetPublisherClusterRoleBindingName(*tc.givenEventing), givenNamespace) }) } } diff --git a/internal/controller/eventing/status.go b/internal/controller/eventing/status.go index 553db634..5fcdb0ff 100644 --- a/internal/controller/eventing/status.go +++ b/internal/controller/eventing/status.go @@ -37,10 +37,17 @@ func (r *Reconciler) syncStatusWithNATSErr(ctx context.Context, // syncStatusWithPublisherProxyErr updates Publisher Proxy condition and sets an error state. // Returns the relevant error. func (r *Reconciler) syncStatusWithPublisherProxyErr(ctx context.Context, + eventing *eventingv1alpha1.Eventing, err error, log *zap.SugaredLogger) error { + return r.syncStatusWithPublisherProxyErrWithReason(ctx, eventingv1alpha1.ConditionReasonDeployedFailed, + eventing, err, log) +} + +func (r *Reconciler) syncStatusWithPublisherProxyErrWithReason(ctx context.Context, + reason eventingv1alpha1.ConditionReason, eventing *eventingv1alpha1.Eventing, err error, log *zap.SugaredLogger) error { // Set error state in status eventing.Status.SetStateError() - eventing.Status.UpdateConditionPublisherProxyReady(metav1.ConditionFalse, eventingv1alpha1.ConditionReasonDeployedFailed, + eventing.Status.UpdateConditionPublisherProxyReady(metav1.ConditionFalse, reason, err.Error()) return errors.Join(err, r.syncEventingStatus(ctx, eventing, log)) diff --git a/pkg/k8s/client.go b/pkg/k8s/client.go index 2930be30..144bc5e8 100644 --- a/pkg/k8s/client.go +++ b/pkg/k8s/client.go @@ -9,6 +9,7 @@ import ( admissionv1 "k8s.io/api/admissionregistration/v1" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -18,6 +19,8 @@ import ( type Client interface { GetDeployment(context.Context, string, string) (*v1.Deployment, error) DeleteDeployment(context.Context, string, string) error + DeleteClusterRole(context.Context, string, string) error + DeleteClusterRoleBinding(context.Context, string, string) error GetNATSResources(context.Context, string) (*natsv1alpha1.NATSList, error) PatchApply(context.Context, client.Object) error GetSecret(context.Context, string) (*corev1.Secret, error) @@ -60,6 +63,32 @@ func (c *KubeClient) DeleteDeployment(ctx context.Context, name, namespace strin return nil } +func (c *KubeClient) DeleteClusterRole(ctx context.Context, name, namespace string) error { + role := &rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + if err := c.client.Delete(ctx, role); err != nil { + return client.IgnoreNotFound(err) + } + return nil +} + +func (c *KubeClient) DeleteClusterRoleBinding(ctx context.Context, name, namespace string) error { + binding := &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + if err := c.client.Delete(ctx, binding); err != nil { + return client.IgnoreNotFound(err) + } + return nil +} + func (c *KubeClient) GetNATSResources(ctx context.Context, namespace string) (*natsv1alpha1.NATSList, error) { natsList := &natsv1alpha1.NATSList{} err := c.client.List(ctx, natsList, &client.ListOptions{Namespace: namespace}) diff --git a/pkg/k8s/client_test.go b/pkg/k8s/client_test.go index 87f64739..38a36dd4 100644 --- a/pkg/k8s/client_test.go +++ b/pkg/k8s/client_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -158,6 +159,112 @@ func Test_DeleteDeployment(t *testing.T) { } } +func Test_DeleteClusterRole(t *testing.T) { + t.Parallel() + // Define test cases + testCases := []struct { + name string + noDeployment bool + }{ + { + name: "ClusterRole exists", + }, + { + name: "ClusterRole does not exist", + noDeployment: true, + }, + } + + // Run tests + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + // given + ctx := context.Background() + fakeClient := fake.NewClientBuilder().Build() + kubeClient := &KubeClient{ + client: fakeClient, + } + clusterRole := &rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrole", + Namespace: "test-namespace", + }, + } + // Create the deployment if it should exist + if !tc.noDeployment { + if err := fakeClient.Create(ctx, clusterRole); err != nil { + t.Fatalf("failed to create ClusterRole: %v", err) + } + } + + // when + err := kubeClient.DeleteClusterRole(ctx, clusterRole.Name, clusterRole.Namespace) + + // then + require.Nil(t, err) + // Check that the deployment was deleted + err = fakeClient.Get(ctx, + types.NamespacedName{Name: clusterRole.Name, Namespace: clusterRole.Namespace}, &rbac.ClusterRole{}) + require.True(t, apierrors.IsNotFound(err), "DeleteClusterRole did not delete ClusterRole") + }) + } +} + +func Test_DeleteClusterRoleBinding(t *testing.T) { + t.Parallel() + // Define test cases + testCases := []struct { + name string + noDeployment bool + }{ + { + name: "ClusterRoleBinding exists", + }, + { + name: "ClusterRoleBinding does not exist", + noDeployment: true, + }, + } + + // Run tests + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + // given + ctx := context.Background() + fakeClient := fake.NewClientBuilder().Build() + kubeClient := &KubeClient{ + client: fakeClient, + } + clusterRoleBinding := &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrolebinding", + Namespace: "test-namespace", + }, + } + // Create the deployment if it should exist + if !tc.noDeployment { + if err := fakeClient.Create(ctx, clusterRoleBinding); err != nil { + t.Fatalf("failed to create ClusterRoleBinding: %v", err) + } + } + + // when + err := kubeClient.DeleteClusterRoleBinding(ctx, clusterRoleBinding.Name, clusterRoleBinding.Namespace) + + // then + require.Nil(t, err) + // Check that the deployment was deleted + err = fakeClient.Get(ctx, + types.NamespacedName{Name: clusterRoleBinding.Name, Namespace: clusterRoleBinding.Namespace}, &rbac.ClusterRoleBinding{}) + require.True(t, apierrors.IsNotFound(err), "DeleteClusterRoleBinding did not delete ClusterRoleBinding") + }) + } +} + func Test_GetSecret(t *testing.T) { t.Parallel() // Define test cases as a table. diff --git a/pkg/k8s/mocks/client.go b/pkg/k8s/mocks/client.go index 7af83585..48927c70 100644 --- a/pkg/k8s/mocks/client.go +++ b/pkg/k8s/mocks/client.go @@ -30,6 +30,94 @@ func (_m *Client) EXPECT() *Client_Expecter { return &Client_Expecter{mock: &_m.Mock} } +// DeleteClusterRole provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Client) DeleteClusterRole(_a0 context.Context, _a1 string, _a2 string) error { + ret := _m.Called(_a0, _a1, _a2) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Client_DeleteClusterRole_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteClusterRole' +type Client_DeleteClusterRole_Call struct { + *mock.Call +} + +// DeleteClusterRole is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 string +func (_e *Client_Expecter) DeleteClusterRole(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_DeleteClusterRole_Call { + return &Client_DeleteClusterRole_Call{Call: _e.mock.On("DeleteClusterRole", _a0, _a1, _a2)} +} + +func (_c *Client_DeleteClusterRole_Call) Run(run func(_a0 context.Context, _a1 string, _a2 string)) *Client_DeleteClusterRole_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *Client_DeleteClusterRole_Call) Return(_a0 error) *Client_DeleteClusterRole_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_DeleteClusterRole_Call) RunAndReturn(run func(context.Context, string, string) error) *Client_DeleteClusterRole_Call { + _c.Call.Return(run) + return _c +} + +// DeleteClusterRoleBinding provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Client) DeleteClusterRoleBinding(_a0 context.Context, _a1 string, _a2 string) error { + ret := _m.Called(_a0, _a1, _a2) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Client_DeleteClusterRoleBinding_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteClusterRoleBinding' +type Client_DeleteClusterRoleBinding_Call struct { + *mock.Call +} + +// DeleteClusterRoleBinding is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +// - _a2 string +func (_e *Client_Expecter) DeleteClusterRoleBinding(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Client_DeleteClusterRoleBinding_Call { + return &Client_DeleteClusterRoleBinding_Call{Call: _e.mock.On("DeleteClusterRoleBinding", _a0, _a1, _a2)} +} + +func (_c *Client_DeleteClusterRoleBinding_Call) Run(run func(_a0 context.Context, _a1 string, _a2 string)) *Client_DeleteClusterRoleBinding_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *Client_DeleteClusterRoleBinding_Call) Return(_a0 error) *Client_DeleteClusterRoleBinding_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_DeleteClusterRoleBinding_Call) RunAndReturn(run func(context.Context, string, string) error) *Client_DeleteClusterRoleBinding_Call { + _c.Call.Return(run) + return _c +} + // DeleteDeployment provides a mock function with given fields: _a0, _a1, _a2 func (_m *Client) DeleteDeployment(_a0 context.Context, _a1 string, _a2 string) error { ret := _m.Called(_a0, _a1, _a2) diff --git a/test/utils/integration/integration.go b/test/utils/integration/integration.go index 83775ad6..b2c95498 100644 --- a/test/utils/integration/integration.go +++ b/test/utils/integration/integration.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "crypto/rand" + "fmt" "log" "os" "path/filepath" @@ -326,9 +327,14 @@ func (env TestEnvironment) TearDown() error { env.TestCancelFn() } + // clean-up created resources + err := env.DeleteSecretFromK8s(getTestBackendConfig().WebhookSecretName, getTestBackendConfig().Namespace) + if err != nil { + fmt.Printf("couldn't clean the webhook secret: %s", err) + } + // retry to stop the api-server sleepTime := 1 * time.Second - var err error const retries = 20 for i := 0; i < retries; i++ { if err = env.EnvTestInstance.Stop(); err == nil { @@ -389,8 +395,6 @@ func (env TestEnvironment) EnsureEPPK8sResourcesHaveOwnerReference(t *testing.T, env.EnsureEPPMetricsServiceOwnerReferenceSet(t, eventingCR) env.EnsureEPPHealthServiceOwnerReferenceSet(t, eventingCR) env.EnsureEPPServiceAccountOwnerReferenceSet(t, eventingCR) - env.EnsureEPPClusterRoleOwnerReferenceSet(t, eventingCR) - env.EnsureEPPClusterRoleBindingOwnerReferenceSet(t, eventingCR) } func (env TestEnvironment) EnsureDeploymentExists(t *testing.T, name, namespace string) { @@ -418,14 +422,14 @@ func (env TestEnvironment) EnsureK8sClusterRoleExists(t *testing.T, name, namesp require.Eventually(t, func() bool { result, err := env.GetClusterRoleFromK8s(name, namespace) return err == nil && result != nil - }, SmallTimeOut, SmallPollingInterval, "failed to ensure existence of ClusterRole") + }, BigTimeOut, BigPollingInterval, "failed to ensure existence of ClusterRole") } func (env TestEnvironment) EnsureK8sClusterRoleBindingExists(t *testing.T, name, namespace string) { require.Eventually(t, func() bool { result, err := env.GetClusterRoleBindingFromK8s(name, namespace) return err == nil && result != nil - }, SmallTimeOut, SmallPollingInterval, "failed to ensure existence of ClusterRoleBinding") + }, BigTimeOut, BigPollingInterval, "failed to ensure existence of ClusterRoleBinding") } func (env TestEnvironment) EnsureHPAExists(t *testing.T, name, namespace string) { @@ -443,6 +447,14 @@ func (env TestEnvironment) EnsureK8sResourceDeleted(t *testing.T, obj client.Obj require.NoError(t, env.k8sClient.Delete(env.Context, obj)) } +func (env TestEnvironment) EnsureNamespaceDeleted(t *testing.T, namespace string) { + require.NoError(t, env.k8sClient.Delete(env.Context, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + })) +} + func (env TestEnvironment) EnsureDeploymentDeletion(t *testing.T, name, namespace string) { deployment := &v1.Deployment{ ObjectMeta: metav1.ObjectMeta{ @@ -906,6 +918,15 @@ func (env TestEnvironment) DeleteEventingFromK8s(name, namespace string) error { return env.k8sClient.Delete(env.Context, cr) } +func (env TestEnvironment) DeleteSecretFromK8s(name, namespace string) error { + return env.k8sClient.Delete(env.Context, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }) +} + func (env TestEnvironment) GetDeploymentFromK8s(name, namespace string) (*v1.Deployment, error) { nn := types.NamespacedName{ Name: name, From 768165d1f27bd08fee2ae6e5eb04fd52a3420b2d Mon Sep 17 00:00:00 2001 From: Mansur Uralov Date: Thu, 14 Sep 2023 09:37:56 +0200 Subject: [PATCH 2/3] Improve code for Review Comments --- api/v1alpha1/eventing_types.go | 2 +- api/v1alpha1/status.go | 2 +- internal/controller/eventing/controller.go | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/api/v1alpha1/eventing_types.go b/api/v1alpha1/eventing_types.go index fd198ca6..3ecc16b1 100644 --- a/api/v1alpha1/eventing_types.go +++ b/api/v1alpha1/eventing_types.go @@ -65,7 +65,7 @@ const ( ConditionReasonEventMeshSubManagerReady ConditionReason = "EventMeshSubscriptionManagerReady" ConditionReasonEventMeshSubManagerFailed ConditionReason = "EventMeshSubscriptionManagerFailed" ConditionReasonEventMeshSubManagerStopFailed ConditionReason = "EventMeshSubscriptionManagerStopFailed" - ConditionReasonEventMeshSubManagerStop ConditionReason = "Stopped" + ConditionReasonEventMeshSubManagerStopped ConditionReason = "Stopped" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. diff --git a/api/v1alpha1/status.go b/api/v1alpha1/status.go index 0a859fee..71fc0287 100644 --- a/api/v1alpha1/status.go +++ b/api/v1alpha1/status.go @@ -71,7 +71,7 @@ func (es *EventingStatus) SetNATSAvailableConditionToTrue() { es.UpdateConditionNATSAvailable(metav1.ConditionTrue, ConditionReasonNATSAvailable, ConditionNATSAvailableMessage) } -func (es *EventingStatus) SetSubscriptionManagerConditionToFalse(reason ConditionReason, message string) { +func (es *EventingStatus) SetSubscriptionManagerReadyConditionToFalse(reason ConditionReason, message string) { es.UpdateConditionSubscriptionManagerReady(metav1.ConditionFalse, reason, message) } diff --git a/internal/controller/eventing/controller.go b/internal/controller/eventing/controller.go index c3cc8230..7256e803 100644 --- a/internal/controller/eventing/controller.go +++ b/internal/controller/eventing/controller.go @@ -19,6 +19,7 @@ package eventing import ( "context" "fmt" + eventingv1alpha1 "github.com/kyma-project/eventing-manager/api/v1alpha1" "github.com/kyma-project/eventing-manager/pkg/env" "github.com/kyma-project/eventing-manager/pkg/eventing" @@ -233,8 +234,8 @@ func (r *Reconciler) handleEventingDeletion(ctx context.Context, eventing *event eventing, err, log) } } - eventing.Status.SetSubscriptionManagerConditionToFalse( - eventingv1alpha1.ConditionReasonEventMeshSubManagerStop, + eventing.Status.SetSubscriptionManagerReadyConditionToFalse( + eventingv1alpha1.ConditionReasonEventMeshSubManagerStopped, eventingv1alpha1.ConditionSubscriptionManagerStoppedMessage) // delete cluster-scoped resources, such as clusterrole and clusterrolebinding. From ef1428374383364df2fc7b9f2053382589caa08f Mon Sep 17 00:00:00 2001 From: Mansur Uralov Date: Thu, 14 Sep 2023 17:36:31 +0200 Subject: [PATCH 3/3] Rename Condition Reason --- api/v1alpha1/eventing_types.go | 2 +- internal/controller/eventing/controller.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/eventing_types.go b/api/v1alpha1/eventing_types.go index 3ecc16b1..f2d61252 100644 --- a/api/v1alpha1/eventing_types.go +++ b/api/v1alpha1/eventing_types.go @@ -40,6 +40,7 @@ const ( // common reasons ConditionReasonProcessing ConditionReason = "Processing" ConditionReasonDeleted ConditionReason = "Deleted" + ConditionReasonStopped ConditionReason = "Stopped" // publisher proxy reasons ConditionReasonDeployed ConditionReason = "Deployed" @@ -65,7 +66,6 @@ const ( ConditionReasonEventMeshSubManagerReady ConditionReason = "EventMeshSubscriptionManagerReady" ConditionReasonEventMeshSubManagerFailed ConditionReason = "EventMeshSubscriptionManagerFailed" ConditionReasonEventMeshSubManagerStopFailed ConditionReason = "EventMeshSubscriptionManagerStopFailed" - ConditionReasonEventMeshSubManagerStopped ConditionReason = "Stopped" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. diff --git a/internal/controller/eventing/controller.go b/internal/controller/eventing/controller.go index 7256e803..273704c6 100644 --- a/internal/controller/eventing/controller.go +++ b/internal/controller/eventing/controller.go @@ -235,7 +235,7 @@ func (r *Reconciler) handleEventingDeletion(ctx context.Context, eventing *event } } eventing.Status.SetSubscriptionManagerReadyConditionToFalse( - eventingv1alpha1.ConditionReasonEventMeshSubManagerStopped, + eventingv1alpha1.ConditionReasonStopped, eventingv1alpha1.ConditionSubscriptionManagerStoppedMessage) // delete cluster-scoped resources, such as clusterrole and clusterrolebinding.