diff --git a/README.md b/README.md index 9f14f409..a3155acf 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,7 @@ Example: "OPERATOR_IMAGE_TAG": "xxx" }, "wca.enable": false, - "go.testTimeout": "600s" + "go.testTimeout": "600s", + "go.testFlags": ["-v"] } ``` \ No newline at end of file diff --git a/ci/pipeline.yaml b/ci/pipeline.yaml index cd1c64a6..79a90b69 100644 --- a/ci/pipeline.yaml +++ b/ci/pipeline.yaml @@ -1027,6 +1027,11 @@ jobs: # Create the YAML for installing the Agent Operator, which we want to package with the release make --silent IMG=" icr.io/instana/instana-agent-operator:${OLM_RELEASE_VERSION}" controller-yaml > target/instana-agent-operator.yaml + echo "delivery.instana.io/rel-docker-agent-local/instana-agent-operator:${OLM_RELEASE_VERSION}" > target/images.txt + echo "icr.io/instana/instana-agent-operator:${OLM_RELEASE_VERSION}" >> target/images.txt + echo "icr.io/instana/instana-agent-operator:latest" >> target/images.txt + cat target/images.txt + # For public releases, also create the appropriate github release: export RELEASE_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+$' if ! [[ $VERSION =~ $RELEASE_REGEX ]]; then diff --git a/ci/scripts/create-github-release.sh b/ci/scripts/create-github-release.sh index d19fd797..77695049 100755 --- a/ci/scripts/create-github-release.sh +++ b/ci/scripts/create-github-release.sh @@ -57,3 +57,4 @@ upload_github_asset() { upload_github_asset "${TARGET_DIR}/${OPERATOR_RESOURCE_FILENAME}" "${OPERATOR_RESOURCE_FILENAME}" upload_github_asset "${TARGET_DIR}/olm-$VERSION.zip" "olm-$VERSION.zip" +upload_github_asset "${TARGET_DIR}/images.txt" "images.txt" diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ecf4ba8a..fcf15c49 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -74,6 +74,8 @@ rules: - events - namespaces - nodes + - nodes/metrics + - nodes/stats - persistentvolumeclaims - persistentvolumes - pods diff --git a/controllers/instanaagent_controller.go b/controllers/instanaagent_controller.go index f5ee6cf1..f6139c2c 100644 --- a/controllers/instanaagent_controller.go +++ b/controllers/instanaagent_controller.go @@ -160,7 +160,7 @@ func (r *InstanaAgentReconciler) reconcile( // adding role property required to manage instana-agent-k8sensor ClusterRole // +kubebuilder:rbac:urls=/version;/healthz,verbs=get // +kubebuilder:rbac:groups=extensions,resources=deployments;replicasets;ingresses,verbs=get;list;watch -// +kubebuilder:rbac:groups=core,resources=configmaps;events;services;endpoints;namespaces;nodes;pods;pods/log;replicationcontrollers;resourcequotas;persistentvolumes;persistentvolumeclaims,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=configmaps;events;services;endpoints;namespaces;nodes;pods;pods/log;replicationcontrollers;resourcequotas;persistentvolumes;persistentvolumeclaims;nodes/metrics;nodes/stats,verbs=get;list;watch // +kubebuilder:rbac:groups=apps,resources=daemonsets;deployments;replicasets;statefulsets,verbs=get;list;watch // +kubebuilder:rbac:groups=batch,resources=cronjobs;jobs,verbs=get;list;watch // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch diff --git a/e2e/agent_test_api.go b/e2e/agent_test_api.go index e177680e..8a92b64c 100644 --- a/e2e/agent_test_api.go +++ b/e2e/agent_test_api.go @@ -44,6 +44,7 @@ import ( // The namespace cannot be just deleted in all scenarios, as finalizers on the agent CR might block the namespace termination func EnsureAgentNamespaceDeletion() env.Func { return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + log.Info("==== Startup Cleanup, errors are expected if resources are not available ====") log.Infof("Ensure namespace %s is not present", cfg.Namespace()) // Create a client to interact with the Kube API r, err := resources.New(cfg.Client().RESTConfig()) @@ -51,6 +52,34 @@ func EnsureAgentNamespaceDeletion() env.Func { return ctx, fmt.Errorf("failed to initialize client: %v", err) } + p := utils.RunCommand("kubectl get pods -n instana-agent") + log.Info("Current pods: ", p.Command(), p.ExitCode(), "\n", p.Result()) + + p = utils.RunCommand("kubectl get agent instana-agent -o yaml -n instana-agent") + log.Info("Current agent CR: ", p.Command(), p.ExitCode(), "\n", p.Result()) + + // Cleanup a potentially existing Agent CR first + if _, err = DeleteAgentCRIfPresent()(ctx, cfg); err != nil { + log.Info("Agent CR cleanup err: ", err) + } + + log.Info("Agent CR cleanup completed") + + // Just in case a helm chart install was present before from helm chart pipeline + p = utils.RunCommand("helm ls -n instana-agent") + log.Info("Current helm chart: ", p.Command(), p.ExitCode(), "\n", p.Result()) + + p = utils.RunCommand("helm uninstall instana-agent -n instana-agent") + if p.Err() != nil { + log.Warningf("Could not delete helm chart, might not be present? %s - %s - %s - %d", p.Command(), p.Err(), p.Out(), p.ExitCode()) + } + + // full purge of resources if anything would be left in the cluster + p = utils.RunCommand("kubectl delete crd/agents.instana.io clusterrole/instana-agent-k8sensor clusterrole/manager-role clusterrole/leader-election-role clusterrolebinding/leader-election-rolebinding clusterrolebinding/manager-rolebinding") + if p.Err() != nil { + log.Warningf("Could not remove some artifacts, ignoring as they might not be present %s - %s - %s - %d", p.Command(), p.Err(), p.Out(), p.ExitCode()) + } + // Check if namespace exist, otherwise just skip over it agentNamespace := &corev1.Namespace{} err = r.Get(ctx, InstanaNamespace, InstanaNamespace, agentNamespace) @@ -58,16 +87,12 @@ func EnsureAgentNamespaceDeletion() env.Func { log.Infof("Namespace %s was not found, skipping deletion", cfg.Namespace()) return ctx, nil } + // Something on the API request failed, this should fail the cleanup if err != nil { return ctx, fmt.Errorf("failed to get namespace: %v", err) } - // Cleanup a potentially existing Agent CR first - if _, err = DeleteAgentCRIfPresent()(ctx, cfg); err != nil { - return ctx, err - } - // Delete the Namespace log.Info("Deleting namespace and waiting for successful termination") if err = r.Delete(ctx, agentNamespace); err != nil { @@ -86,6 +111,7 @@ func EnsureAgentNamespaceDeletion() env.Func { return ctx, fmt.Errorf("error while waiting for namespace deletion: %v", err) } log.Infof("Namespace %s is gone", cfg.Namespace()) + log.Info("==== Cleanup compleated ====") return ctx, nil } } diff --git a/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole.go b/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole.go index 93e463f1..eaf1c42c 100644 --- a/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole.go +++ b/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole.go @@ -60,6 +60,8 @@ func (c *clusterRoleBuilder) Build() optional.Optional[client.Object] { "endpoints", "namespaces", "nodes", + "nodes/metrics", + "nodes/stats", "pods", "pods/log", "replicationcontrollers", diff --git a/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole_test.go b/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole_test.go index a2508f0f..f0694908 100644 --- a/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole_test.go +++ b/pkg/k8s/object/builders/k8s-sensor/rbac/clusterrole_test.go @@ -77,6 +77,8 @@ func TestClusterRoleBuilder_Build(t *testing.T) { "endpoints", "namespaces", "nodes", + "nodes/metrics", + "nodes/stats", "pods", "pods/log", "replicationcontrollers",