From e803cd269c9c47f96958e0751a449db96a41255b Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:13:05 +0200 Subject: [PATCH 1/3] [8.x](backport #6293) chore: update version and appVersion in the elastic-agent helm chart (#6354) * chore: update version and appVersion in the elastic-agent helm chart (#6293) * chore: update version and appVersion in the elastic-agent helm chart * chore: render examples * fix: correct agentVersion * fix: mark elastic-agent helm chart with -beta suffix * fix: update chart version and render examples (cherry picked from commit df2786ffae0015cbec6770f61a49f30047ae7d46) * chore: update helm chart version --------- Co-authored-by: Panos Koutsovasilis --- deploy/helm/elastic-agent/Chart.yaml | 3 +- .../examples/eck/rendered/manifest.yaml | 30 ++-- .../fleet-managed/rendered/manifest.yaml | 10 +- .../kubernetes-default/rendered/manifest.yaml | 30 ++-- .../rendered/manifest.yaml | 30 ++-- .../rendered/manifest.yaml | 10 +- .../rendered/manifest.yaml | 30 ++-- .../rendered/manifest.yaml | 4 +- .../rendered/manifest.yaml | 10 +- .../user-cluster-role/rendered/manifest.yaml | 8 +- .../rendered/manifest.yaml | 24 +-- magefile.go | 156 +++++++++--------- 12 files changed, 175 insertions(+), 170 deletions(-) diff --git a/deploy/helm/elastic-agent/Chart.yaml b/deploy/helm/elastic-agent/Chart.yaml index ed0a65b6e2c..9a650d65563 100644 --- a/deploy/helm/elastic-agent/Chart.yaml +++ b/deploy/helm/elastic-agent/Chart.yaml @@ -3,4 +3,5 @@ name: elastic-agent description: Elastic-Agent Helm Chart kubeVersion: ">= 1.27.0-0" type: application -version: 0.0.1 +appVersion: 8.18.0 +version: 8.18.0-beta diff --git a/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml index 289ad3cfd96..488a1840709 100644 --- a/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -20,7 +20,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -34,7 +34,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -48,7 +48,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -100,7 +100,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -389,7 +389,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -566,7 +566,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -690,7 +690,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -923,7 +923,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1005,7 +1005,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1026,7 +1026,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1047,7 +1047,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1069,7 +1069,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1171,7 +1171,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1239,7 +1239,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml index 75b73b0e265..82094162c40 100644 --- a/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -18,7 +18,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -38,7 +38,7 @@ kind: ClusterRole metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -144,7 +144,7 @@ kind: ClusterRoleBinding metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -164,7 +164,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml index dc773f42f28..6e96135b2df 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -18,7 +18,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -30,7 +30,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -42,7 +42,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -99,7 +99,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -393,7 +393,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -575,7 +575,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -697,7 +697,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -928,7 +928,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1008,7 +1008,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1027,7 +1027,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1046,7 +1046,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1066,7 +1066,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1185,7 +1185,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1266,7 +1266,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml index 293daa05358..7347621c8c8 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -18,7 +18,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -30,7 +30,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -42,7 +42,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -99,7 +99,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -393,7 +393,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -577,7 +577,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -699,7 +699,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -930,7 +930,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1010,7 +1010,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1029,7 +1029,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1048,7 +1048,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1068,7 +1068,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1212,7 +1212,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1293,7 +1293,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml index 3f21fc22042..bbc15db88e5 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -18,7 +18,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -110,7 +110,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -190,7 +190,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -210,7 +210,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml index 920c80d585e..1fa1b2ce54f 100644 --- a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -18,7 +18,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -30,7 +30,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -42,7 +42,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -125,7 +125,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -419,7 +419,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -603,7 +603,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -725,7 +725,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -956,7 +956,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1036,7 +1036,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1055,7 +1055,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1074,7 +1074,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1094,7 +1094,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1228,7 +1228,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1299,7 +1299,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml index 50e4d436cdb..fc5932afe44 100644 --- a/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -61,7 +61,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml index 6be784d840f..fab0e40c507 100644 --- a/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -18,7 +18,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -182,7 +182,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -262,7 +262,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -282,7 +282,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml index 680b9552cc8..fc33a38afdc 100644 --- a/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -21,7 +21,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -77,7 +77,7 @@ kind: ClusterRoleBinding metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -99,7 +99,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml index f484e6af5dc..1c956194757 100644 --- a/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -63,7 +63,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -357,7 +357,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -539,7 +539,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -663,7 +663,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -896,7 +896,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -978,7 +978,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -997,7 +997,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1016,7 +1016,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1036,7 +1036,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1155,7 +1155,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 @@ -1236,7 +1236,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-8.18.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 8.18.0 diff --git a/magefile.go b/magefile.go index 32253e50134..17d2aa4c8d8 100644 --- a/magefile.go +++ b/magefile.go @@ -113,25 +113,6 @@ var ( goIntegTestTimeout = 2 * time.Hour // goProvisionAndTestTimeout is the timeout used for both provisioning and running tests. goProvisionAndTestTimeout = goIntegTestTimeout + 30*time.Minute - - helmChartsValues = []struct { - path string - versionKeys []string - tagKeys []string - }{ - // elastic-agent Helm Chart - { - helmChartPath, - []string{"agent", "version"}, - []string{"agent", "image", "tag"}, - }, - // edot-collector values file for kube-stack Helm Chart - { - helmOtelChartPath, - []string{"defaultCRConfig", "image", "tag"}, - nil, - }, - } ) func init() { @@ -3406,6 +3387,9 @@ type otelDependencies struct { type Helm mg.Namespace +// RenderExamples runs the equivalent of `helm template` and `helm lint` +// for the examples of the Elastic Helm chart which are located at +// `deploy/helm/elastic-agent/examples` directory. func (Helm) RenderExamples() error { settings := cli.New() // Helm CLI settings actionConfig := &action.Configuration{} @@ -3486,71 +3470,43 @@ func (Helm) RenderExamples() error { return nil } +// UpdateAgentVersion updates the agent version in the Elastic-Agent and EDOT-Collector Helm charts. func (Helm) UpdateAgentVersion() error { - for _, chart := range helmChartsValues { - valuesFile := filepath.Join(chart.path, "values.yaml") - - data, err := os.ReadFile(valuesFile) - if err != nil { - return fmt.Errorf("failed to read file: %w", err) - } - - isTagged, err := devtools.TagContainsCommit() - if err != nil { - return fmt.Errorf("failed to check if tag contains commit: %w", err) - } - - if !isTagged { - isTagged = os.Getenv(snapshotEnv) != "" - } - - agentVersion := getVersion() - - // Parse YAML into a Node structure because - // it maintains comments - var rootNode yaml.Node - err = yaml.Unmarshal(data, &rootNode) - if err != nil { - return fmt.Errorf("failed to unmarshal YAML: %w", err) - } - - if rootNode.Kind != yaml.DocumentNode { - return fmt.Errorf("root node is not a document node") - } else if len(rootNode.Content) == 0 { - return fmt.Errorf("root node has no content") - } - - if err := updateYamlNodes(rootNode.Content[0], agentVersion, chart.versionKeys...); err != nil { + agentVersion := bversion.GetParsedAgentPackageVersion().CoreVersion() + agentSnapshotVersion := agentVersion + "-SNAPSHOT" + // until the Helm chart reaches GA this remains with -beta suffix + agentChartVersion := agentVersion + "-beta" + + for yamlFile, keyVals := range map[string][]struct { + key string + value string + }{ + // values file for elastic-agent Helm Chart + filepath.Join(helmChartPath, "values.yaml"): { + {"agent.version", agentVersion}, + // always use the SNAPSHOT version for image tag + // for the chart that resides in the git repo + {"agent.image.tag", agentSnapshotVersion}, + }, + // Chart.yaml for elastic-agent Helm Chart + filepath.Join(helmChartPath, "Chart.yaml"): { + {"appVersion", agentVersion}, + {"version", agentChartVersion}, + }, + // edot-collector values file for kube-stack Helm Chart + filepath.Join(helmOtelChartPath, "values.yaml"): { + {"defaultCRConfig.image.tag", agentVersion}, + }, + } { + if err := updateYamlFile(yamlFile, keyVals...); err != nil { return fmt.Errorf("failed to update agent version: %w", err) } - - if !isTagged && len(chart.tagKeys) > 0 { - if err := updateYamlNodes(rootNode.Content[0], fmt.Sprintf("%s-SNAPSHOT", agentVersion), chart.tagKeys...); err != nil { - return fmt.Errorf("failed to update agent image tag: %w", err) - } - } - - // Truncate values file - file, err := os.Create(valuesFile) - if err != nil { - return fmt.Errorf("failed to open file for writing: %w", err) - } - defer file.Close() - - // Create a YAML encoder with 2-space indentation - encoder := yaml.NewEncoder(file) - encoder.SetIndent(2) - - // Encode the updated YAML node back to the file - err = encoder.Encode(&rootNode) - if err != nil { - return fmt.Errorf("failed to encode updated YAML: %w", err) - } } return nil } +// Lint lints the Elastic-Agent Helm chart. func (Helm) Lint() error { settings := cli.New() // Helm CLI settings actionConfig := &action.Configuration{} @@ -3569,6 +3525,54 @@ func (Helm) Lint() error { return nil } +func updateYamlFile(path string, keyVal ...struct { + key string + value string +}) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + // Parse YAML into a Node structure because + // it maintains comments + var rootNode yaml.Node + err = yaml.Unmarshal(data, &rootNode) + if err != nil { + return fmt.Errorf("failed to unmarshal YAML: %w", err) + } + + if rootNode.Kind != yaml.DocumentNode { + return fmt.Errorf("root node is not a document node") + } else if len(rootNode.Content) == 0 { + return fmt.Errorf("root node has no content") + } + + for _, kv := range keyVal { + if err := updateYamlNodes(rootNode.Content[0], kv.value, strings.Split(kv.key, ".")...); err != nil { + return fmt.Errorf("failed to update agent version: %w", err) + } + } + + // Truncate values file + file, err := os.Create(path) + if err != nil { + return fmt.Errorf("failed to open file for writing: %w", err) + } + defer file.Close() + + // Create a YAML encoder with 2-space indentation + encoder := yaml.NewEncoder(file) + encoder.SetIndent(2) + + // Encode the updated YAML node back to the file + err = encoder.Encode(&rootNode) + if err != nil { + return fmt.Errorf("failed to encode updated YAML: %w", err) + } + return nil +} + func updateYamlNodes(rootNode *yaml.Node, value string, keys ...string) error { if len(keys) == 0 { return fmt.Errorf("no keys provided") From a65b482a2b212f5cafe9b01f77a001a7445b6ff2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:13:20 +0200 Subject: [PATCH 2/3] [k8s]: transition integration tests to adapter pattern (#6277) (#6358) * feat: transition kubernetes integration tests to adapter pattern * fix: count nodes without NoSchedule taints (cherry picked from commit 67c744fd51e95401ce8de96d27278bb7ed3d3cde) Co-authored-by: Panos Koutsovasilis --- .../kubernetes_agent_service_test.go | 91 +- .../kubernetes_agent_standalone_test.go | 872 +++++++++--------- 2 files changed, 485 insertions(+), 478 deletions(-) diff --git a/testing/integration/kubernetes_agent_service_test.go b/testing/integration/kubernetes_agent_service_test.go index ed479c99702..a66d8547d8a 100644 --- a/testing/integration/kubernetes_agent_service_test.go +++ b/testing/integration/kubernetes_agent_service_test.go @@ -7,17 +7,13 @@ package integration import ( - "bufio" - "bytes" "context" - "fmt" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/e2e-framework/klient/k8s" "github.com/elastic/elastic-agent/pkg/testing/define" @@ -35,73 +31,38 @@ func TestKubernetesAgentService(t *testing.T) { Group: define.Kubernetes, }) + // read the service agent config + serviceAgentYAML, err := os.ReadFile(filepath.Join("testdata", "connectors.agent.yml")) + require.NoError(t, err, "failed to read service agent config") + ctx := context.Background() kCtx := k8sGetContext(t, info) - testNamespace := kCtx.getNamespace(t) - - renderedManifest, err := renderKustomize(agentK8SKustomize) - require.NoError(t, err, "failed to render kustomize") - - k8sObjects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) - require.NoError(t, err, "failed to convert yaml to k8s objects") - // add the testNamespace in the k8sObjects - k8sObjects = append([]k8s.Object{&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}}, k8sObjects...) + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") - t.Cleanup(func() { - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sObjects...) - require.NoError(t, err, "failed to delete k8s namespace") - }) - - k8sKustomizeAdjustObjects(k8sObjects, testNamespace, "elastic-agent-standalone", - func(container *corev1.Container) { - // set agent image - container.Image = kCtx.agentImage - // set ImagePullPolicy to "Never" to avoid pulling the image - // as the image is already loaded by the kubernetes provisioner - container.ImagePullPolicy = "Never" - - // set Elasticsearch host and API key - for idx, env := range container.Env { - if env.Name == "ES_HOST" { - container.Env[idx].Value = kCtx.esHost - container.Env[idx].ValueFrom = nil - } - if env.Name == "API_KEY" { - container.Env[idx].Value = kCtx.esAPIKey - container.Env[idx].ValueFrom = nil + testSteps := []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerMemoryLimit: "800Mi", + }, func(obj k8s.Object) { + // update the configmap to only run the connectors input + switch objWithType := obj.(type) { + case *corev1.ConfigMap: + _, ok := objWithType.Data["agent.yml"] + if ok { + objWithType.Data["agent.yml"] = string(serviceAgentYAML) } } - }, - func(pod *corev1.PodSpec) { - for volumeIdx, volume := range pod.Volumes { - // need to update the volume path of the state directory - // to match the test namespace - if volume.Name == "elastic-agent-state" { - hostPathType := corev1.HostPathDirectoryOrCreate - pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ - Type: &hostPathType, - Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), - } - } - } - }) - - // update the configmap to only run the connectors input - serviceAgentYAML, err := os.ReadFile(filepath.Join("testdata", "connectors.agent.yml")) - require.NoError(t, err) - for _, obj := range k8sObjects { - switch objWithType := obj.(type) { - case *corev1.ConfigMap: - _, ok := objWithType.Data["agent.yml"] - if ok { - objWithType.Data["agent.yml"] = string(serviceAgentYAML) - } - } + }), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", map[string]bool{ + "connectors-py": true, + }), } - k8sKustomizeDeployAgent(t, ctx, kCtx.client, k8sObjects, testNamespace, false, kCtx.logsBasePath, - true, map[string]bool{ - "connectors-py": true, - }) + testNamespace := kCtx.getNamespace(t) + for _, step := range testSteps { + step(t, ctx, kCtx, testNamespace) + } } diff --git a/testing/integration/kubernetes_agent_standalone_test.go b/testing/integration/kubernetes_agent_standalone_test.go index 86db561edd8..148706f3654 100644 --- a/testing/integration/kubernetes_agent_standalone_test.go +++ b/testing/integration/kubernetes_agent_standalone_test.go @@ -77,64 +77,84 @@ func TestKubernetesAgentStandaloneKustomize(t *testing.T) { Group: define.Kubernetes, }) + ctx := context.Background() kCtx := k8sGetContext(t, info) - renderedManifest, err := renderKustomize(agentK8SKustomize) - require.NoError(t, err, "failed to render kustomize") + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") testCases := []struct { - name string - runUser *int64 - runGroup *int64 - capabilitiesDrop []corev1.Capability - capabilitiesAdd []corev1.Capability - runK8SInnerTests bool - skipReason string + name string + skipReason string + steps []k8sTestStep }{ { - "default deployment - rootful agent", - nil, - nil, - nil, - nil, - false, - "", + name: "default deployment - rootful agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + }, }, { - "drop ALL capabilities - rootful agent", - int64Ptr(0), - nil, - []corev1.Capability{"ALL"}, - []corev1.Capability{}, - false, - "", + name: "drop ALL capabilities - rootful agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(0), + agentContainerCapabilitiesAdd: []corev1.Capability{}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + }, }, { - "drop ALL add CHOWN, SETPCAP capabilities - rootful agent", - int64Ptr(0), - nil, - []corev1.Capability{"ALL"}, - []corev1.Capability{"CHOWN", "SETPCAP"}, - true, - "", + name: "drop ALL add CHOWN, SETPCAP capabilities - rootful agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(0), + agentContainerCapabilitiesAdd: []corev1.Capability{"CHOWN", "SETPCAP"}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + k8sStepRunInnerTests("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone"), + }, }, { - "drop ALL add CHOWN, SETPCAP capabilities - rootless agent", - int64Ptr(1000), // elastic-agent uid - nil, - []corev1.Capability{"ALL"}, - []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, - true, - "", + name: "drop ALL add CHOWN, SETPCAP, DAC_READ_SEARCH, SYS_PTRACE capabilities - rootless agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(1000), + agentContainerRunGroup: int64Ptr(1000), + agentContainerCapabilitiesAdd: []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + k8sStepRunInnerTests("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone"), + }, }, { - "drop ALL add CHOWN, SETPCAP capabilities - rootless agent random uid:gid", - int64Ptr(500), - int64Ptr(500), - []corev1.Capability{"ALL"}, - []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, - true, - "", + name: "drop ALL add CHOWN, SETPCAP, DAC_READ_SEARCH, SYS_PTRACE capabilities - rootless agent random uid:gid", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(500), + agentContainerRunGroup: int64Ptr(500), + agentContainerCapabilitiesAdd: []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + k8sStepRunInnerTests("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone"), + }, }, } @@ -145,72 +165,11 @@ func TestKubernetesAgentStandaloneKustomize(t *testing.T) { t.Skip(tc.skipReason) } - ctx := context.Background() - testNamespace := kCtx.getNamespace(t) - k8sObjects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) - require.NoError(t, err, "failed to convert yaml to k8s objects") - - // add the testNamespace in the beginning of k8sObjects to be created first - k8sObjects = append([]k8s.Object{&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}}, k8sObjects...) - - t.Cleanup(func() { - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sObjects...) - require.NoError(t, err, "failed to delete k8s namespace") - }) - - k8sKustomizeAdjustObjects(k8sObjects, testNamespace, "elastic-agent-standalone", - func(container *corev1.Container) { - // set agent image - container.Image = kCtx.agentImage - // set ImagePullPolicy to "Never" to avoid pulling the image - // as the image is already loaded by the kubernetes provisioner - container.ImagePullPolicy = "Never" - - container.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("800Mi"), - } - - if tc.capabilitiesDrop != nil || tc.capabilitiesAdd != nil || tc.runUser != nil || tc.runGroup != nil { - // set security context - container.SecurityContext = &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Drop: tc.capabilitiesDrop, - Add: tc.capabilitiesAdd, - }, - RunAsUser: tc.runUser, - RunAsGroup: tc.runGroup, - } - } - // set Elasticsearch host and API key - for idx, env := range container.Env { - if env.Name == "ES_HOST" { - container.Env[idx].Value = kCtx.esHost - container.Env[idx].ValueFrom = nil - } - if env.Name == "API_KEY" { - container.Env[idx].Value = kCtx.esAPIKey - container.Env[idx].ValueFrom = nil - } - } - }, - func(pod *corev1.PodSpec) { - for volumeIdx, volume := range pod.Volumes { - // need to update the volume path of the state directory - // to match the test namespace - if volume.Name == "elastic-agent-state" { - hostPathType := corev1.HostPathDirectoryOrCreate - pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ - Type: &hostPathType, - Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), - } - } - } - }) - - k8sKustomizeDeployAgent(t, ctx, kCtx.client, k8sObjects, testNamespace, tc.runK8SInnerTests, - kCtx.logsBasePath, true, nil) + for _, step := range tc.steps { + step(t, ctx, kCtx, testNamespace) + } }) } } @@ -228,85 +187,47 @@ func TestKubernetesAgentOtel(t *testing.T) { Group: define.Kubernetes, }) + ctx := context.Background() kCtx := k8sGetContext(t, info) - renderedManifest, err := renderKustomize(agentK8SKustomize) - require.NoError(t, err, "failed to render kustomize") + nodeList := corev1.NodeList{} + err := kCtx.client.Resources().List(ctx, &nodeList) + require.NoError(t, err) + + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") testCases := []struct { - name string - envAdd []corev1.EnvVar - runK8SInnerTests bool + name string + skipReason string + steps []k8sTestStep }{ { - "run agent in otel mode", - []corev1.EnvVar{ - {Name: "ELASTIC_AGENT_OTEL", Value: "true"}, + name: "run agent in otel mode", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerMemoryLimit: "800Mi", + agentContainerExtraEnv: []corev1.EnvVar{{Name: "ELASTIC_AGENT_OTEL", Value: "true"}}, + agentContainerArgs: []string{}, // clear default args + }, nil), }, - false, }, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - testNamespace := kCtx.getNamespace(t) - - k8sObjects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) - require.NoError(t, err, "failed to convert yaml to k8s objects") - - // add the testNamespace in the k8sObjects - k8sObjects = append([]k8s.Object{&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}}, k8sObjects...) - - t.Cleanup(func() { - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sObjects...) - require.NoError(t, err, "failed to delete k8s namespace") - }) - - k8sKustomizeAdjustObjects(k8sObjects, testNamespace, "elastic-agent-standalone", - func(container *corev1.Container) { - // set agent image - container.Image = kCtx.agentImage - // set ImagePullPolicy to "Never" to avoid pulling the image - // as the image is already loaded by the kubernetes provisioner - container.ImagePullPolicy = "Never" - - // set Elasticsearch host and API key - for idx, env := range container.Env { - if env.Name == "ES_HOST" { - container.Env[idx].Value = kCtx.esHost - container.Env[idx].ValueFrom = nil - } - if env.Name == "API_KEY" { - container.Env[idx].Value = kCtx.esAPIKey - container.Env[idx].ValueFrom = nil - } - } - - if len(tc.envAdd) > 0 { - container.Env = append(container.Env, tc.envAdd...) - } + if tc.skipReason != "" { + t.Skip(tc.skipReason) + } - // drop arguments overriding default config - container.Args = []string{} - }, - func(pod *corev1.PodSpec) { - for volumeIdx, volume := range pod.Volumes { - // need to update the volume path of the state directory - // to match the test namespace - if volume.Name == "elastic-agent-state" { - hostPathType := corev1.HostPathDirectoryOrCreate - pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ - Type: &hostPathType, - Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), - } - } - } - }) + testNamespace := kCtx.getNamespace(t) - k8sKustomizeDeployAgent(t, ctx, kCtx.client, k8sObjects, testNamespace, - false, kCtx.logsBasePath, false, nil) + for _, step := range tc.steps { + step(t, ctx, kCtx, testNamespace) + } }) } } @@ -331,282 +252,145 @@ func TestKubernetesAgentHelm(t *testing.T) { err := kCtx.client.Resources().List(ctx, &nodeList) require.NoError(t, err) - totalK8SNodes := len(nodeList.Items) - require.NotZero(t, totalK8SNodes, "No Kubernetes nodes found") + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") testCases := []struct { - name string - values map[string]any - atLeastAgentPods int - runK8SInnerTests bool - agentPodLabelSelectors []string + name string + skipReason string + steps []k8sTestStep }{ { name: "helm standalone agent default kubernetes privileged", - values: map[string]any{ - "kubernetes": map[string]any{ - "enabled": true, - }, - "agent": map[string]any{ - "unprivileged": false, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "kubernetes": map[string]any{ + "enabled": true, }, - }, - "outputs": map[string]any{ - "default": map[string]any{ - "type": "ESPlainAuthAPI", - "url": kCtx.esHost, - "api_key": kCtx.esAPIKey, + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - // - clusterWide Deployment (1 agent pod) - // - ksmSharded Statefulset (1 agent pod) - atLeastAgentPods: totalK8SNodes + 1 + 1, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", - "name=agent-clusterwide-helm-agent", - "name=agent-ksmsharded-helm-agent", + "outputs": map[string]any{ + "default": map[string]any{ + "type": "ESPlainAuthAPI", + "url": kCtx.esHost, + "api_key": kCtx.esAPIKey, + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil), + k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"), + k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"), }, }, { name: "helm standalone agent default kubernetes unprivileged", - values: map[string]any{ - "kubernetes": map[string]any{ - "enabled": true, - }, - "agent": map[string]any{ - "unprivileged": true, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "kubernetes": map[string]any{ + "enabled": true, }, - }, - "outputs": map[string]any{ - "default": map[string]any{ - "type": "ESPlainAuthAPI", - "url": kCtx.esHost, - "api_key": kCtx.esAPIKey, + "agent": map[string]any{ + "unprivileged": true, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - // - clusterWide Deployment (1 agent pod) - // - ksmSharded Statefulset (1 agent pod) - atLeastAgentPods: totalK8SNodes + 1 + 1, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", - "name=agent-clusterwide-helm-agent", - "name=agent-ksmsharded-helm-agent", + "outputs": map[string]any{ + "default": map[string]any{ + "type": "ESPlainAuthAPI", + "url": kCtx.esHost, + "api_key": kCtx.esAPIKey, + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil), + k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"), + k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"), }, }, { name: "helm managed agent default kubernetes privileged", - values: map[string]any{ - "agent": map[string]any{ - "unprivileged": false, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", - }, - "fleet": map[string]any{ - "enabled": true, - "url": kCtx.enrollParams.FleetURL, - "token": kCtx.enrollParams.EnrollmentToken, - "preset": "perNode", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - atLeastAgentPods: totalK8SNodes, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), }, }, { name: "helm managed agent default kubernetes unprivileged", - values: map[string]any{ - "agent": map[string]any{ - "unprivileged": true, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": true, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, }, - "fleet": map[string]any{ - "enabled": true, - "url": kCtx.enrollParams.FleetURL, - "token": kCtx.enrollParams.EnrollmentToken, - "preset": "perNode", - }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - atLeastAgentPods: totalK8SNodes, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + if tc.skipReason != "" { + t.Skip(tc.skipReason) + } + ctx := context.Background() testNamespace := kCtx.getNamespace(t) - settings := cli.New() - settings.SetNamespace(testNamespace) - actionConfig := &action.Configuration{} - - helmChart, err := loader.Load(agentK8SHelm) - require.NoError(t, err, "failed to load helm chart") - - err = actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), "", - func(format string, v ...interface{}) {}) - require.NoError(t, err, "failed to init helm action config") - - helmValues := tc.values - - k8sNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}} - - t.Cleanup(func() { - if t.Failed() { - if err := k8sDumpAllPodLogs(ctx, kCtx.client, testNamespace, testNamespace, kCtx.logsBasePath); err != nil { - t.Logf("failed to dump logs: %s", err) - } - } - - uninstallAction := action.NewUninstall(actionConfig) - uninstallAction.Wait = true - _, err = uninstallAction.Run("helm-agent") - if err != nil { - t.Logf("failed to uninstall helm chart: %s", err) - } - - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sNamespace) - if err != nil { - t.Logf("failed to delete k8s namespace: %s", err) - } - }) - - installAction := action.NewInstall(actionConfig) - installAction.Namespace = testNamespace - installAction.CreateNamespace = true - installAction.UseReleaseName = true - installAction.ReleaseName = "helm-agent" - installAction.Timeout = 2 * time.Minute - installAction.Wait = true - installAction.WaitForJobs = true - _, err = installAction.Run(helmChart, helmValues) - require.NoError(t, err, "failed to install helm chart") - - healthyAgentPods := 0 - for _, podSelector := range tc.agentPodLabelSelectors { - pods := &corev1.PodList{} - err = kCtx.client.Resources(testNamespace).List(ctx, pods, func(opt *metav1.ListOptions) { - opt.LabelSelector = podSelector - }) - require.NoError(t, err, "failed to list pods with selector ", podSelector) - - for _, pod := range pods.Items { - var stdout, stderr bytes.Buffer - err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, testNamespace, pod.Name, "agent", map[string]bool{}) - if err != nil { - t.Errorf("failed to check agent status: %v", err) - t.Logf("stdout: %s\n", stdout.String()) - t.Logf("stderr: %s\n", stderr.String()) - t.FailNow() - } - healthyAgentPods++ - - if !tc.runK8SInnerTests { - continue - } - - stdout.Reset() - stderr.Reset() - err := kCtx.client.Resources().ExecInPod(ctx, testNamespace, pod.Name, "agent", - []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) - t.Logf("%s k8s-inner-tests output:", pod.Name) - t.Log(stdout.String()) - if err != nil { - t.Log(stderr.String()) - } - require.NoError(t, err, "error at k8s inner tests execution") - } + for _, step := range tc.steps { + step(t, ctx, kCtx, testNamespace) } - - require.GreaterOrEqual(t, healthyAgentPods, tc.atLeastAgentPods, - fmt.Sprintf("at least %d agent containers should be checked", tc.atLeastAgentPods)) }) } } -func k8sKustomizeDeployAgent(t *testing.T, ctx context.Context, client klient.Client, objects []k8s.Object, - namespace string, runK8SInnerTests bool, testlogsBasePath string, checkStatus bool, componentPresence map[string]bool, -) { - err := k8sCreateObjects(ctx, client, k8sCreateOpts{namespace: namespace, wait: true}, objects...) - require.NoError(t, err, "failed to create k8s objects") - - t.Cleanup(func() { - if t.Failed() { - if err := k8sDumpAllPodLogs(ctx, client, namespace, namespace, testlogsBasePath); err != nil { - t.Logf("failed to dump logs: %s", err) - } - } - }) - - pods := &corev1.PodList{} - podsLabelSelector := fmt.Sprintf("app=elastic-agent-standalone") - err = client.Resources(namespace).List(ctx, pods, func(opt *metav1.ListOptions) { - opt.LabelSelector = podsLabelSelector - }) - require.NoError(t, err, "failed to list pods with selector ", podsLabelSelector) - require.NotEmpty(t, pods.Items, "no pods found with selector ", podsLabelSelector) - - for _, pod := range pods.Items { - var stdout, stderr bytes.Buffer - - if checkStatus { - err = k8sCheckAgentStatus(ctx, client, &stdout, &stderr, namespace, pod.Name, "elastic-agent-standalone", componentPresence) - if err != nil { - t.Errorf("failed to check agent status: %v", err) - t.Logf("stdout: %s\n", stdout.String()) - t.Logf("stderr: %s\n", stderr.String()) - t.FailNow() - } - } - - stdout.Reset() - stderr.Reset() - - if runK8SInnerTests { - err := client.Resources().ExecInPod(ctx, namespace, pod.Name, "elastic-agent-standalone", - []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) - t.Logf("%s k8s-inner-tests output:", pod.Name) - t.Log(stdout.String()) - if err != nil { - t.Log(stderr.String()) - } - require.NoError(t, err, "error at k8s inner tests execution") - } - } -} - // k8sCheckAgentStatus checks that the agent reports healthy. func k8sCheckAgentStatus(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, namespace string, agentPodName string, containerName string, componentPresence map[string]bool, @@ -806,8 +590,8 @@ func k8sYAMLToObjects(reader *bufio.Reader) ([]k8s.Object, error) { return objects, nil } -// renderKustomize renders the given kustomize directory to YAML -func renderKustomize(kustomizePath string) ([]byte, error) { +// k8sRenderKustomize renders the given kustomize directory to YAML +func k8sRenderKustomize(kustomizePath string) ([]byte, error) { // Create a file system pointing to the kustomize directory fSys := filesys.MakeFsOnDisk() @@ -1072,6 +856,38 @@ func (k8sContext) getNamespace(t *testing.T) string { return noSpecialCharsRegexp.ReplaceAllString(testNamespace, "") } +func k8sSchedulableNodeCount(ctx context.Context, kCtx k8sContext) (int, error) { + nodeList := corev1.NodeList{} + err := kCtx.client.Resources().List(ctx, &nodeList) + if err != nil { + return 0, err + } + + totalSchedulableNodes := 0 + + for _, node := range nodeList.Items { + if node.Spec.Unschedulable { + continue + } + + hasNoScheduleTaint := false + for _, taint := range node.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule { + hasNoScheduleTaint = true + break + } + } + + if hasNoScheduleTaint { + continue + } + + totalSchedulableNodes++ + } + + return totalSchedulableNodes, err +} + // k8sGetContext performs all the necessary checks to get a k8sContext for the current test func k8sGetContext(t *testing.T, info *define.Info) k8sContext { agentImage := os.Getenv("AGENT_IMAGE") @@ -1118,3 +934,233 @@ func k8sGetContext(t *testing.T, info *define.Info) k8sContext { enrollParams: enrollParams, } } + +// k8sTestStep is a function that performs a single step in a k8s integration test +type k8sTestStep func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) + +// k8sStepCreateNamespace creates a namespace for the current test and adds a test cleanup that +// deletes it +func k8sStepCreateNamespace() k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + k8sNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + t.Cleanup(func() { + err := k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sNamespace) + if err != nil { + t.Logf("failed to delete namespace: %v", err) + } + }) + + err := k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true}, k8sNamespace) + require.NoError(t, err, "failed to create namespace") + } +} + +// k8sKustomizeOverrides is futile attempt to make kustomize somewhat flexible and +// contains certain handpicked overrides to apply to the k8s objects created from +// kustomize rendering +type k8sKustomizeOverrides struct { + agentContainerRunUser *int64 + agentContainerRunGroup *int64 + agentContainerCapabilitiesDrop []corev1.Capability + agentContainerCapabilitiesAdd []corev1.Capability + agentContainerExtraEnv []corev1.EnvVar + agentContainerArgs []string + agentContainerMemoryLimit string +} + +// k8sStepDeployKustomize renders a kustomize manifest and deploys it. Also, it tries to +// adjust the k8s objects created from the rendering to match the needs of the current test with k8sKustomizeOverrides. +// However, this is not that as flexible as we would like it to be. As a last resort somebody can use forEachObject callback +// to further adjust the k8s objects +func k8sStepDeployKustomize(kustomizePath string, containerName string, overrides k8sKustomizeOverrides, forEachObject func(object k8s.Object)) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + renderedManifest, err := k8sRenderKustomize(kustomizePath) + require.NoError(t, err, "failed to render kustomize") + + objects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) + require.NoError(t, err, "failed to parse rendered kustomize") + + if forEachObject != nil { + for _, object := range objects { + forEachObject(object) + } + } + + k8sKustomizeAdjustObjects(objects, namespace, containerName, + func(container *corev1.Container) { + // set agent image + container.Image = kCtx.agentImage + // set ImagePullPolicy to "Never" to avoid pulling the image + // as the image is already loaded by the kubernetes provisioner + container.ImagePullPolicy = "Never" + + if overrides.agentContainerMemoryLimit != "" { + container.Resources.Limits = corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse(overrides.agentContainerMemoryLimit), + } + } + + // if security context overrides are set then set security context + if overrides.agentContainerCapabilitiesDrop != nil || overrides.agentContainerCapabilitiesAdd != nil || + overrides.agentContainerRunUser != nil || overrides.agentContainerRunGroup != nil { + // set security context + container.SecurityContext = &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: overrides.agentContainerCapabilitiesDrop, + Add: overrides.agentContainerCapabilitiesAdd, + }, + RunAsUser: overrides.agentContainerRunUser, + RunAsGroup: overrides.agentContainerRunGroup, + } + } + + // set Elasticsearch host and API key + for idx, env := range container.Env { + if env.Name == "ES_HOST" { + container.Env[idx].Value = kCtx.esHost + container.Env[idx].ValueFrom = nil + } + if env.Name == "API_KEY" { + container.Env[idx].Value = kCtx.esAPIKey + container.Env[idx].ValueFrom = nil + } + } + + if len(overrides.agentContainerExtraEnv) > 0 { + container.Env = append(container.Env, overrides.agentContainerExtraEnv...) + } + + if overrides.agentContainerArgs != nil { + // drop arguments overriding default config + container.Args = []string{} + } + }, + func(pod *corev1.PodSpec) { + for volumeIdx, volume := range pod.Volumes { + // need to update the volume path of the state directory + // to match the test namespace + if volume.Name == "elastic-agent-state" { + hostPathType := corev1.HostPathDirectoryOrCreate + pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ + Type: &hostPathType, + Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", namespace), + } + } + } + }) + + t.Cleanup(func() { + if t.Failed() { + if err := k8sDumpAllPodLogs(ctx, kCtx.client, namespace, namespace, kCtx.logsBasePath); err != nil { + t.Logf("failed to dump logs: %v", err) + } + } + + err := k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, objects...) + if err != nil { + t.Logf("failed to delete objects: %v", err) + } + }) + + err = k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true}, objects...) + require.NoError(t, err, "failed to create objects") + } +} + +// k8sStepCheckAgentStatus checks the status of the agent inside the pods returned by the selector +func k8sStepCheckAgentStatus(agentPodLabelSelector string, expectedPodNumber int, containerName string, componentPresence map[string]bool) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + perNodePodList := &corev1.PodList{} + err := kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = agentPodLabelSelector + }) + require.NoError(t, err, "failed to list pods with selector ", perNodePodList) + require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList) + require.Equal(t, expectedPodNumber, len(perNodePodList.Items), "unexpected number of pods found with selector ", perNodePodList) + + for _, pod := range perNodePodList.Items { + var stdout, stderr bytes.Buffer + err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, pod.Name, containerName, componentPresence) + if err != nil { + t.Errorf("failed to check agent status %s: %v", pod.Name, err) + t.Logf("stdout: %s\n", stdout.String()) + t.Logf("stderr: %s\n", stderr.String()) + t.FailNow() + } + } + } +} + +// k8sStepRunInnerTests invokes the k8s inner tests inside the pods returned by the selector. Note that this +// step requires the agent image to be built with the testing framework as there is the point where the binary +// for the inner tests is copied +func k8sStepRunInnerTests(agentPodLabelSelector string, expectedPodNumber int, containerName string) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + perNodePodList := &corev1.PodList{} + err := kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = agentPodLabelSelector + }) + require.NoError(t, err, "failed to list pods with selector ", perNodePodList) + require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList) + require.Equal(t, expectedPodNumber, len(perNodePodList.Items), "unexpected number of pods found with selector ", perNodePodList) + + for _, pod := range perNodePodList.Items { + var stdout, stderr bytes.Buffer + err = kCtx.client.Resources().ExecInPod(ctx, namespace, pod.Name, containerName, + []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) + t.Logf("%s k8s-inner-tests output:", pod.Name) + t.Log(stdout.String()) + if err != nil { + t.Log(stderr.String()) + } + require.NoError(t, err, "error at k8s inner tests execution") + } + } +} + +// k8sStepHelmDeploy deploys a helm chart with the given values and the release name +func k8sStepHelmDeploy(chartPath string, releaseName string, values map[string]any) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + settings := cli.New() + settings.SetNamespace(namespace) + actionConfig := &action.Configuration{} + + helmChart, err := loader.Load(chartPath) + require.NoError(t, err, "failed to load helm chart") + + err = actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), "", + func(format string, v ...interface{}) {}) + require.NoError(t, err, "failed to init helm action config") + + t.Cleanup(func() { + if t.Failed() { + if err := k8sDumpAllPodLogs(ctx, kCtx.client, namespace, namespace, kCtx.logsBasePath); err != nil { + t.Logf("failed to dump logs: %v", err) + } + } + + uninstallAction := action.NewUninstall(actionConfig) + uninstallAction.Wait = true + _, err = uninstallAction.Run(releaseName) + if err != nil { + t.Logf("failed to uninstall helm chart: %v", err) + } + }) + + installAction := action.NewInstall(actionConfig) + installAction.Namespace = namespace + installAction.CreateNamespace = true + installAction.UseReleaseName = true + installAction.ReleaseName = releaseName + installAction.Timeout = 2 * time.Minute + installAction.Wait = true + installAction.WaitForJobs = true + _, err = installAction.Run(helmChart, values) + require.NoError(t, err, "failed to install helm chart") + } +} From 0ee7b18aa65cfe1e299594e13e7b5561474b3857 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:34:51 +0000 Subject: [PATCH 3/3] Update golang.org/x/crypto to v0.31.0 (#6336) (#6351) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit (cherry picked from commit 04c4b3846bc3c2d889203b6629dfb4f0a988758a) # Conflicts: # go.mod Co-authored-by: Mikołaj Świątek --- NOTICE.txt | 24 ++++++++++++------------ go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index ec64c458397..3f31b51bf89 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -16092,11 +16092,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.29.0 +Version: v0.31.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.31.0/LICENSE: Copyright 2009 The Go Authors. @@ -16129,11 +16129,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/exp -Version: v0.0.0-20240904232852-e7e105dedf7e +Version: v0.0.0-20240719175910-8a7402abbf56 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/exp@v0.0.0-20240904232852-e7e105dedf7e/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/exp@v0.0.0-20240719175910-8a7402abbf56/LICENSE: Copyright 2009 The Go Authors. @@ -16166,11 +16166,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sync -Version: v0.9.0 +Version: v0.10.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.9.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.10.0/LICENSE: Copyright 2009 The Go Authors. @@ -16203,11 +16203,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.27.0 +Version: v0.28.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.27.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.28.0/LICENSE: Copyright 2009 The Go Authors. @@ -16240,11 +16240,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/term -Version: v0.26.0 +Version: v0.27.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.26.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.27.0/LICENSE: Copyright 2009 The Go Authors. @@ -16277,11 +16277,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/text -Version: v0.20.0 +Version: v0.21.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.20.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.21.0/LICENSE: Copyright 2009 The Go Authors. diff --git a/go.mod b/go.mod index 6b03750f69d..5c87d735018 100644 --- a/go.mod +++ b/go.mod @@ -68,12 +68,12 @@ require ( go.opentelemetry.io/collector/component/componentstatus v0.115.0 go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.29.0 - golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 + golang.org/x/crypto v0.31.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.25.0 google.golang.org/grpc v1.67.1 diff --git a/go.sum b/go.sum index 8cb04d71349..cbc8bdd4773 100644 --- a/go.sum +++ b/go.sum @@ -1737,8 +1737,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1749,8 +1749,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= -golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1853,8 +1853,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1934,8 +1934,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1950,8 +1950,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1967,8 +1967,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=