Skip to content

Commit

Permalink
enhancement(5832): updated k8s test, refactored upgrade test into tes…
Browse files Browse the repository at this point in the history
…t step
  • Loading branch information
kaanyalti committed Dec 20, 2024
1 parent 7687fa7 commit aec6441
Showing 1 changed file with 22 additions and 209 deletions.
231 changes: 22 additions & 209 deletions testing/integration/kubernetes_agent_standalone_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ func TestKubernetesAgentHelm(t *testing.T) {
k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil),
k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil),
k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil),
k8sStepCheckRestrictUpgrade("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"),
k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"),
k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"),
k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"),
Expand Down Expand Up @@ -321,6 +322,7 @@ func TestKubernetesAgentHelm(t *testing.T) {
k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil),
k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil),
k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil),
k8sStepCheckRestrictUpgrade("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"),
k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"),
k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"),
k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"),
Expand Down Expand Up @@ -480,215 +482,6 @@ func TestKubernetesAgentHelm(t *testing.T) {
}
}

func TestRestrictCliUpgrade(t *testing.T) {
info := define.Require(t, define.Requirements{
Stack: &define.Stack{},
Local: false,
Sudo: false,
OS: []define.OS{
{Type: define.Kubernetes, DockerVariant: "basic"},
{Type: define.Kubernetes, DockerVariant: "wolfi"},
{Type: define.Kubernetes, DockerVariant: "ubi"},
{Type: define.Kubernetes, DockerVariant: "complete"},
{Type: define.Kubernetes, DockerVariant: "complete-wolfi"},
},
Group: define.Kubernetes,
})

agentImage := os.Getenv("AGENT_IMAGE")
require.NotEmpty(t, agentImage, "AGENT_IMAGE must be set")

agentImageParts := strings.SplitN(agentImage, ":", 2)
require.Len(t, agentImageParts, 2, "AGENT_IMAGE must be in the form '<repository>:<version>'")
agentImageRepo := agentImageParts[0]
agentImageTag := agentImageParts[1]

client, err := info.KubeClient()
require.NoError(t, err)
require.NotNil(t, client)

testLogsBasePath := os.Getenv("K8S_TESTS_POD_LOGS_BASE")
require.NotEmpty(t, testLogsBasePath, "K8S_TESTS_POD_LOGS_BASE must be set")

err = os.MkdirAll(filepath.Join(testLogsBasePath, t.Name()), 0755)
require.NoError(t, err, "failed to create test logs directory")

namespace := info.Namespace

esHost := os.Getenv("ELASTICSEARCH_HOST")
require.NotEmpty(t, esHost, "ELASTICSEARCH_HOST must be set")

esAPIKey, err := generateESAPIKey(info.ESClient, namespace)
require.NoError(t, err, "failed to generate ES API key")
require.NotEmpty(t, esAPIKey, "failed to generate ES API key")

require.NoError(t, err, "failed to create fleet enroll params")

testCases := []struct {
name string
values map[string]any
atLeastValidatedPodsNumber int
runK8SInnerTests bool
}{
{
name: "helm standalone agent default kubernetes privileged",
values: map[string]any{
"kubernetes": map[string]any{
"enabled": true,
},
"agent": map[string]any{
"unprivileged": false,
"image": map[string]any{
"repository": agentImageRepo,
"tag": agentImageTag,
"pullPolicy": "Never",
},
},
"outputs": map[string]any{
"default": map[string]any{
"type": "ESPlainAuthAPI",
"url": esHost,
"api_key": esAPIKey,
},
},
},
runK8SInnerTests: true,
// - perNode Daemonset (at least 1 agent pod)
// - clusterWide Deployment (1 agent pod)
// - ksmSharded Statefulset (1 agent pod)
atLeastValidatedPodsNumber: 3,
},
{
name: "helm standalone agent default kubernetes unprivileged",
values: map[string]any{
"kubernetes": map[string]any{
"enabled": true,
},
"agent": map[string]any{
"unprivileged": true,
"image": map[string]any{
"repository": agentImageRepo,
"tag": agentImageTag,
"pullPolicy": "Never",
},
},
"outputs": map[string]any{
"default": map[string]any{
"type": "ESPlainAuthAPI",
"url": esHost,
"api_key": esAPIKey,
},
},
},
runK8SInnerTests: true,
// - perNode Daemonset (at least 1 agent pod)
// - clusterWide Deployment (1 agent pod)
// - ksmSharded Statefulset (1 agent pod)
atLeastValidatedPodsNumber: 3,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
hasher := sha256.New()
hasher.Write([]byte(tc.name))
testNamespace := strings.ToLower(base64.URLEncoding.EncodeToString(hasher.Sum(nil)))
testNamespace = noSpecialCharsRegexp.ReplaceAllString(testNamespace, "")

settings := cli.New()
settings.SetNamespace(testNamespace)
actionConfig := &action.Configuration{}

helmChart, err := loader.Load(agentK8SHelm)
require.NoError(t, err, "failed to load helm chart")

err = actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), "",
func(format string, v ...interface{}) {})
require.NoError(t, err, "failed to init helm action config")

helmValues := tc.values

t.Cleanup(func() {
if t.Failed() {
k8sDumpAllPodLogs(ctx, client, testNamespace, testNamespace, testLogsBasePath)
}

uninstallAction := action.NewUninstall(actionConfig)
uninstallAction.Wait = true

_, err = uninstallAction.Run("helm-agent")
if err != nil {
require.NoError(t, err, "failed to uninstall helm chart")
}
})

installAction := action.NewInstall(actionConfig)
installAction.Namespace = testNamespace
installAction.CreateNamespace = true
installAction.UseReleaseName = true
installAction.ReleaseName = "helm-agent"
installAction.Timeout = 2 * time.Minute
installAction.Wait = true
installAction.WaitForJobs = true
_, err = installAction.Run(helmChart, helmValues)
require.NoError(t, err, "failed to install helm chart")

podList := &corev1.PodList{}
err = client.Resources(testNamespace).List(ctx, podList)
require.NoError(t, err, fmt.Sprintf("failed to list pods in namespace %s", testNamespace))

checkedAgentContainers := 0

for _, pod := range podList.Items {
if !strings.HasPrefix(pod.GetName(), "agent-") {
continue
}

command := []string{"elastic-agent", "status"}
var stdout, stderr bytes.Buffer
var agentHealthyErr error
// we will wait maximum 120 seconds for the agent to report healthy
for i := 0; i < 120; i++ {
stdout.Reset()
stderr.Reset()
agentHealthyErr = client.Resources().ExecInPod(ctx, testNamespace, pod.Name, "agent", command, &stdout, &stderr)
if agentHealthyErr == nil {
break
}
time.Sleep(time.Second * 1)
}

statusString := stdout.String()
if agentHealthyErr != nil {
t.Errorf("elastic-agent never reported healthy: %v", agentHealthyErr)
t.Logf("stdout: %s\n", statusString)
t.Logf("stderr: %s\n", stderr.String())
t.FailNow()
return
}

stdout.Reset()
stderr.Reset()

upgradeCmd := []string{"elastic-agent", "upgrade", "1.0.0"}
upgradeCmdError := client.Resources().ExecInPod(ctx, testNamespace, pod.Name, "agent", upgradeCmd, &stdout, &stderr)

errOut := stderr.String()
require.Error(t, upgradeCmdError)
require.Contains(t, errOut, coordinator.ErrNotUpgradable.Error())

stderr.Reset()

checkedAgentContainers++
}

require.GreaterOrEqual(t, checkedAgentContainers, tc.atLeastValidatedPodsNumber,
fmt.Sprintf("at least %d agent containers should be checked", tc.atLeastValidatedPodsNumber))
})
}
}

// k8sCheckAgentStatus checks that the agent reports healthy.
func k8sCheckAgentStatus(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer,
namespace string, agentPodName string, containerName string, componentPresence map[string]bool,
Expand Down Expand Up @@ -1545,3 +1338,23 @@ func k8sStepHintsRedisDelete() k8sTestStep {
require.NoError(t, err, "failed to delete redis k8s objects")
}
}

func k8sStepCheckRestrictUpgrade(agentPodLabelSelector string, expectedPodNumber int, containerName string) k8sTestStep {
return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) {
perNodePodList := &corev1.PodList{}
err := kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) {
opt.LabelSelector = agentPodLabelSelector
})
require.NoError(t, err, "failed to list pods with selector ", perNodePodList)
require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList)
require.Equal(t, expectedPodNumber, len(perNodePodList.Items), "unexpected number of pods found with selector ", perNodePodList)
for _, pod := range perNodePodList.Items {
var stdout, stderr bytes.Buffer

command := []string{"elastic-agent", "upgrade", "1.0.0"}
err := kCtx.client.Resources().ExecInPod(ctx, namespace, pod.Name, containerName, command, &stdout, &stderr)
require.Error(t, err)
require.Contains(t, stderr.String(), coordinator.ErrNotUpgradable.Error())
}
}
}

0 comments on commit aec6441

Please sign in to comment.