diff --git a/testing/integration/kubernetes_agent_standalone_test.go b/testing/integration/kubernetes_agent_standalone_test.go index 148706f3654..f3158b29644 100644 --- a/testing/integration/kubernetes_agent_standalone_test.go +++ b/testing/integration/kubernetes_agent_standalone_test.go @@ -373,6 +373,94 @@ func TestKubernetesAgentHelm(t *testing.T) { k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), }, }, + { + name: "helm standalone agent unprivileged kubernetes hints", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + // NOTE: Setting the version to something released is mandatory as when we enable hints + // we have an init container that downloads a released agent archive and extracts + // the templates from there. If and when we embed the templates directly in the + // agent image, we can remove this. + "version": "8.16.0", + "unprivileged": true, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + }, + "kubernetes": map[string]any{ + "enabled": true, + "hints": map[string]any{ + "enabled": true, + }, + }, + "outputs": map[string]any{ + "default": map[string]any{ + "type": "ESPlainAuthAPI", + "url": kCtx.esHost, + "api_key": kCtx.esAPIKey, + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil), + k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"), + k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"), + k8sStepHintsRedisCreate(), + k8sStepHintsRedisCheckAgentStatus("name=agent-pernode-helm-agent", true), + k8sStepHintsRedisDelete(), + k8sStepHintsRedisCheckAgentStatus("name=agent-pernode-helm-agent", false), + }, + }, + { + name: "helm standalone agent unprivileged kubernetes hints pre-deployed", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHintsRedisCreate(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + // NOTE: Setting the version to something released is mandatory as when we enable hints + // we have an init container that downloads a released agent archive and extracts + // the templates from there. If and when we embed the templates directly in the + // agent image, we can remove this. + "version": "8.16.0", + "unprivileged": true, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + }, + "kubernetes": map[string]any{ + "enabled": true, + "hints": map[string]any{ + "enabled": true, + }, + }, + "outputs": map[string]any{ + "default": map[string]any{ + "type": "ESPlainAuthAPI", + "url": kCtx.esHost, + "api_key": kCtx.esAPIKey, + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil), + k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"), + k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"), + k8sStepHintsRedisCheckAgentStatus("name=agent-pernode-helm-agent", true), + k8sStepHintsRedisDelete(), + k8sStepHintsRedisCheckAgentStatus("name=agent-pernode-helm-agent", false), + }, + }, } for _, tc := range testCases { @@ -1164,3 +1252,86 @@ func k8sStepHelmDeploy(chartPath string, releaseName string, values map[string]a require.NoError(t, err, "failed to install helm chart") } } + +func k8sStepHintsRedisCreate() k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + r, err := os.Open("testdata/k8s.hints.redis.yaml") + require.NoError(t, err, "failed to open redis k8s test data") + + redisObjs, err := k8sYAMLToObjects(bufio.NewReader(r)) + require.NoError(t, err, "failed to convert redis yaml to k8s objects") + + t.Cleanup(func() { + err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, redisObjs...) + require.NoError(t, err, "failed to delete redis k8s objects") + }) + + err = k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true, waitTimeout: 120 * time.Second, namespace: namespace}, redisObjs...) + require.NoError(t, err, "failed to create redis k8s objects") + } +} + +func k8sStepHintsRedisCheckAgentStatus(agentPodLabelSelector string, hintDeployed bool) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + agentPodList := &corev1.PodList{} + err := kCtx.client.Resources(namespace).List(ctx, agentPodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = agentPodLabelSelector + }) + require.NoError(t, err, "failed to list agent pods with selector ", agentPodLabelSelector) + require.NotEmpty(t, agentPodList.Items, "no agent pods found with selector ", agentPodLabelSelector) + + redisPodSelector := "app.kubernetes.io/name=redis" + redisPodList := &corev1.PodList{} + err = kCtx.client.Resources(namespace).List(ctx, redisPodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = redisPodSelector + }) + require.NoError(t, err, "failed to list redis pods with selector ", redisPodSelector) + if hintDeployed { + require.NotEmpty(t, redisPodList.Items, "no redis pods found with selector ", redisPodSelector) + // check that redis pods have the correct annotations + for _, redisPod := range redisPodList.Items { + hintPackage, ok := redisPod.ObjectMeta.Annotations["co.elastic.hints/package"] + require.True(t, ok, "missing hints annotation") + require.Equal(t, "redis", hintPackage, "hints annotation package wrong value") + } + } else { + require.Empty(t, redisPodList.Items, "redis pods should not exist ", redisPodSelector) + } + + for _, pod := range agentPodList.Items { + shouldExist := hintDeployed + if shouldExist { + redisPodOnSameNode := false + for _, redisPod := range redisPodList.Items { + redisPodOnSameNode = redisPod.Spec.NodeName == pod.Spec.NodeName + if redisPodOnSameNode { + break + } + } + shouldExist = shouldExist && redisPodOnSameNode + } + + var stdout, stderr bytes.Buffer + err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, pod.Name, "agent", map[string]bool{ + "redis/metrics": shouldExist, + }) + if err != nil { + t.Errorf("failed to check agent status %s: %v", pod.Name, err) + t.Logf("stdout: %s\n", stdout.String()) + t.Logf("stderr: %s\n", stderr.String()) + t.FailNow() + } + } + } +} + +func k8sStepHintsRedisDelete() k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + redisPod := &corev1.Pod{} + err := kCtx.client.Resources(namespace).Get(ctx, "redis", namespace, redisPod) + require.NoError(t, err, "failed to get redis pod") + + err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, redisPod) + require.NoError(t, err, "failed to delete redis k8s objects") + } +} diff --git a/testing/integration/testdata/k8s.hints.redis.yaml b/testing/integration/testdata/k8s.hints.redis.yaml new file mode 100644 index 00000000000..c21f7421d46 --- /dev/null +++ b/testing/integration/testdata/k8s.hints.redis.yaml @@ -0,0 +1,64 @@ +apiVersion: v1 +kind: Pod +metadata: + name: redis + annotations: + co.elastic.hints/package: redis # don't remove this as integration tests depend on it + co.elastic.hints/data_streams: info + co.elastic.hints/host: '${kubernetes.pod.ip}:6379' + co.elastic.hints/info.period: 5s + labels: + k8s-app: redis + app: redis + app.kubernetes.io/name: redis # don't change this as integration tests depend on it +spec: + containers: + - name: redis + image: redis:5.0.4 + command: + - redis-server + - "/redis-master/redis.conf" + env: + - name: MASTER + value: "true" + ports: + - containerPort: 6379 + resources: + limits: + cpu: "0.1" + volumeMounts: + - mountPath: /redis-master-data + name: data + - mountPath: /redis-master + name: config + volumes: + - name: data + emptyDir: {} + - name: config + configMap: + name: example-redis-config + items: + - key: redis-config + path: redis.conf +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-redis-config +data: + redis-config: | + maxmemory 2mb + maxmemory-policy allkeys-lru +--- +apiVersion: v1 +kind: Service +metadata: + name: redis +spec: + type: ClusterIP + ports: + - port: 6379 + targetPort: 6379 + name: client + selector: + app: redis