From 675d00121f61c5314bc76f4a49db37db163395a2 Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Mon, 9 Sep 2024 18:52:20 -0400 Subject: [PATCH 1/7] Support for tenant with no base cluster topology --- .../cluster_manager/cluster_manager.go | 19 +++++++++++++++++++ kardinal-manager/kardinal-manager/main.go | 1 - 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go b/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go index 2c8022c0..b565c48e 100644 --- a/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go +++ b/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go @@ -284,6 +284,10 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus servicesByNS := lo.GroupBy(*clusterResources.Services, func(item corev1.Service) string { return item.Namespace }) + if len(servicesByNS) == 0 { + // There are no resources to apply so we attempt to clear resources in the namespace set in the dummy gateway sent by the kontrol service + servicesByNS[clusterResources.Gateway.GetNamespace()] = []corev1.Service{} + } for namespace, services := range servicesByNS { if err := manager.cleanUpServicesInNamespace(ctx, namespace, services); err != nil { return stacktrace.Propagate(err, "An error occurred cleaning up services '%+v' in namespace '%s'", services, namespace) @@ -292,6 +296,9 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus // Clean up deployments deploymentsByNS := lo.GroupBy(*clusterResources.Deployments, func(item appsv1.Deployment) string { return item.Namespace }) + if len(deploymentsByNS) == 0 { + deploymentsByNS[clusterResources.Gateway.GetNamespace()] = []appsv1.Deployment{} + } for namespace, deployments := range deploymentsByNS { if err := manager.cleanUpDeploymentsInNamespace(ctx, namespace, deployments); err != nil { return stacktrace.Propagate(err, "An error occurred cleaning up deployments '%+v' in namespace '%s'", deployments, namespace) @@ -300,6 +307,9 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus // Clean up virtual services virtualServicesByNS := lo.GroupBy(*clusterResources.VirtualServices, func(item v1alpha3.VirtualService) string { return item.Namespace }) + if len(virtualServicesByNS) == 0 { + virtualServicesByNS[clusterResources.Gateway.GetNamespace()] = []v1alpha3.VirtualService{} + } for namespace, virtualServices := range virtualServicesByNS { if err := manager.cleanUpVirtualServicesInNamespace(ctx, namespace, virtualServices); err != nil { return stacktrace.Propagate(err, "An error occurred cleaning up virtual services '%+v' in namespace '%s'", virtualServices, namespace) @@ -310,6 +320,9 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus destinationRulesByNS := lo.GroupBy(*clusterResources.DestinationRules, func(item v1alpha3.DestinationRule) string { return item.Namespace }) + if len(destinationRulesByNS) == 0 { + destinationRulesByNS[clusterResources.Gateway.GetNamespace()] = []v1alpha3.DestinationRule{} + } for namespace, destinationRules := range destinationRulesByNS { if err := manager.cleanUpDestinationRulesInNamespace(ctx, namespace, destinationRules); err != nil { return stacktrace.Propagate(err, "An error occurred cleaning up destination rules '%+v' in namespace '%s'", destinationRules, namespace) @@ -331,6 +344,9 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus envoyFiltersByNS := lo.GroupBy(*clusterResources.EnvoyFilters, func(item v1alpha3.EnvoyFilter) string { return item.Namespace }) + if len(envoyFiltersByNS) == 0 { + envoyFiltersByNS[clusterResources.Gateway.GetNamespace()] = []v1alpha3.EnvoyFilter{} + } for namespace, envoyFilters := range envoyFiltersByNS { if err := manager.cleanupEnvoyFiltersInNamespace(ctx, namespace, envoyFilters); err != nil { return stacktrace.Propagate(err, "An error occurred cleaning up envoy filters '%+v' in namespace '%s'", envoyFilters, namespace) @@ -343,6 +359,9 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus authorizationPoliciesByNS := lo.GroupBy(*clusterResources.AuthorizationPolicies, func(item securityv1beta1.AuthorizationPolicy) string { return item.Namespace }) + if len(authorizationPoliciesByNS) == 0 { + authorizationPoliciesByNS[clusterResources.Gateway.GetNamespace()] = []securityv1beta1.AuthorizationPolicy{} + } for namespace, authorizationPolicies := range authorizationPoliciesByNS { if err := manager.cleanupAuthorizationPoliciesInNamespace(ctx, namespace, authorizationPolicies); err != nil { return stacktrace.Propagate(err, "An error occurred cleaning up authorization policies '%+v' in namespace '%s'", authorizationPolicies, namespace) diff --git a/kardinal-manager/kardinal-manager/main.go b/kardinal-manager/kardinal-manager/main.go index 5b351b04..af3f2974 100644 --- a/kardinal-manager/kardinal-manager/main.go +++ b/kardinal-manager/kardinal-manager/main.go @@ -13,7 +13,6 @@ import ( const ( successExitCode = 0 clusterConfigEndpointEnvVarKey = "KARDINAL_MANAGER_CLUSTER_CONFIG_ENDPOINT" - tenantUuidEnvVarKey = "KARDINAL_MANAGER_TENANT_UUID" ) func main() { From 782080486fcc4fef028cd94f930f202436cbf52d Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Mon, 9 Sep 2024 19:01:36 -0400 Subject: [PATCH 2/7] Cleanup --- .../kardinal-manager/cluster_manager/cluster_manager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go b/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go index b565c48e..bfa5d301 100644 --- a/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go +++ b/kardinal-manager/kardinal-manager/cluster_manager/cluster_manager.go @@ -285,7 +285,9 @@ func (manager *ClusterManager) CleanUpClusterResources(ctx context.Context, clus return item.Namespace }) if len(servicesByNS) == 0 { - // There are no resources to apply so we attempt to clear resources in the namespace set in the dummy gateway sent by the kontrol service + // There are no resources to apply so we attempt to clear resources in the namespace set in the dummy gateway + // sent by the kontrol service. This happens when the tenant has no base cluster topology; no initial deploy + // or the topologies have been deleted. servicesByNS[clusterResources.Gateway.GetNamespace()] = []corev1.Service{} } for namespace, services := range servicesByNS { From 96c0bb9e4a72b6dab38cba01b37c808f96f62f5e Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Tue, 10 Sep 2024 12:01:59 -0400 Subject: [PATCH 3/7] Add CI test --- .github/workflows/ci-e2e-tests.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/.github/workflows/ci-e2e-tests.yml b/.github/workflows/ci-e2e-tests.yml index 951976bf..a24ae149 100644 --- a/.github/workflows/ci-e2e-tests.yml +++ b/.github/workflows/ci-e2e-tests.yml @@ -172,3 +172,26 @@ jobs: - name: Delete template run: | KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli template delete extra-item-shared + + - name: Delete base topology and dev flows + run: | + KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli flow delete prod > kardinal.out + cat kardinal.out + if KARDINAL_CLI_DEV_MODE=TRUE kardinal flow ls | grep prod; then echo "Topologies not deleted"; exit 1; fi + + # Check that the services have been terminated + while [ "$(kubectl get pods -n prod) != "No resources found in prod namespace" ] + do + echo "Waiting for the services to terminate..." + kubectl get pods -n prod + ((c++)) && ((c==12)) && exit 1 + sleep 10 + done + kubectl get pods -n prod + + # Check for errors in the kardinal manager logs + if kubectl logs -n default -l dev.kardinal.app-id=kardinal-manager | grep "ERRO" + then + echo "Errors found in the kardinal manager" + kubectl logs -n default -l dev.kardinal.app-id=kardinal-manager | grep "ERRO" + fi From a53a29315b548448e0947bf974f7617ebb34c800 Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Tue, 10 Sep 2024 13:19:41 -0400 Subject: [PATCH 4/7] Fix call to cli --- .github/workflows/ci-e2e-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-e2e-tests.yml b/.github/workflows/ci-e2e-tests.yml index a24ae149..2c130b38 100644 --- a/.github/workflows/ci-e2e-tests.yml +++ b/.github/workflows/ci-e2e-tests.yml @@ -177,7 +177,7 @@ jobs: run: | KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli flow delete prod > kardinal.out cat kardinal.out - if KARDINAL_CLI_DEV_MODE=TRUE kardinal flow ls | grep prod; then echo "Topologies not deleted"; exit 1; fi + if KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli flow ls | grep prod; then echo "Topologies not deleted"; exit 1; fi # Check that the services have been terminated while [ "$(kubectl get pods -n prod) != "No resources found in prod namespace" ] From 448b3ee10298344de245108416935e31b882ff43 Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Tue, 10 Sep 2024 19:30:16 -0400 Subject: [PATCH 5/7] Typo --- .github/workflows/ci-e2e-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-e2e-tests.yml b/.github/workflows/ci-e2e-tests.yml index 2c130b38..2d55a327 100644 --- a/.github/workflows/ci-e2e-tests.yml +++ b/.github/workflows/ci-e2e-tests.yml @@ -180,7 +180,7 @@ jobs: if KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli flow ls | grep prod; then echo "Topologies not deleted"; exit 1; fi # Check that the services have been terminated - while [ "$(kubectl get pods -n prod) != "No resources found in prod namespace" ] + while [ "$(kubectl get pods -n prod)" != "No resources found in prod namespace" ] do echo "Waiting for the services to terminate..." kubectl get pods -n prod From 9b01822e0c70d5f340470234a4312006a12e211e Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Tue, 10 Sep 2024 19:44:43 -0400 Subject: [PATCH 6/7] Typo --- .github/workflows/ci-e2e-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-e2e-tests.yml b/.github/workflows/ci-e2e-tests.yml index 2d55a327..3959a6df 100644 --- a/.github/workflows/ci-e2e-tests.yml +++ b/.github/workflows/ci-e2e-tests.yml @@ -180,7 +180,7 @@ jobs: if KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli flow ls | grep prod; then echo "Topologies not deleted"; exit 1; fi # Check that the services have been terminated - while [ "$(kubectl get pods -n prod)" != "No resources found in prod namespace" ] + while [ "$(kubectl get pods -n prod)" != "No resources found in prod namespace." ] do echo "Waiting for the services to terminate..." kubectl get pods -n prod From 1342a08f607ebf6935b7fa6ad2b36380faa68914 Mon Sep 17 00:00:00 2001 From: Laurent Luce Date: Tue, 10 Sep 2024 19:55:42 -0400 Subject: [PATCH 7/7] Compare stderr to know if there are no more pods in the prod namespace --- .github/workflows/ci-e2e-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-e2e-tests.yml b/.github/workflows/ci-e2e-tests.yml index 3959a6df..ebc60b64 100644 --- a/.github/workflows/ci-e2e-tests.yml +++ b/.github/workflows/ci-e2e-tests.yml @@ -180,7 +180,7 @@ jobs: if KARDINAL_CLI_DEV_MODE=TRUE /tmp/kardinal-cli flow ls | grep prod; then echo "Topologies not deleted"; exit 1; fi # Check that the services have been terminated - while [ "$(kubectl get pods -n prod)" != "No resources found in prod namespace." ] + while [ "$(kubectl get pods -n prod 2>&1 >/dev/null)" != "No resources found in prod namespace." ] do echo "Waiting for the services to terminate..." kubectl get pods -n prod