diff --git a/NOTICE.txt b/NOTICE.txt index 35c42f0f264..ad16f53b718 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1319,11 +1319,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.17.4 +Version: v0.17.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.17.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.17.5/LICENSE: Apache License Version 2.0, January 2004 @@ -1952,11 +1952,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.16.0 +Version: v8.17.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.16.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.17.0/LICENSE: Apache License Version 2.0, January 2004 @@ -16092,11 +16092,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.29.0 +Version: v0.31.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.31.0/LICENSE: Copyright 2009 The Go Authors. @@ -16203,11 +16203,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.27.0 +Version: v0.28.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.27.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.28.0/LICENSE: Copyright 2009 The Go Authors. @@ -16240,11 +16240,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/term -Version: v0.26.0 +Version: v0.27.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.26.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.27.0/LICENSE: Copyright 2009 The Go Authors. @@ -16277,11 +16277,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/text -Version: v0.20.0 +Version: v0.21.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.20.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.21.0/LICENSE: Copyright 2009 The Go Authors. @@ -89358,11 +89358,11 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdat -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/collector/pipeline -Version: v0.115.0 +Version: v0.116.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pipeline@v0.115.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pipeline@v0.116.0/LICENSE: Apache License @@ -89570,11 +89570,11 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pipe -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/collector/pipeline/pipelineprofiles -Version: v0.115.0 +Version: v0.116.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pipeline/pipelineprofiles@v0.115.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pipeline/pipelineprofiles@v0.116.0/LICENSE: Apache License diff --git a/changelog/fragments/1732043830-Fix-audit-unenroll-call-when-running-fleet-server.yaml b/changelog/fragments/1732043830-Fix-audit-unenroll-call-when-running-fleet-server.yaml new file mode 100644 index 00000000000..5cd1cb4fc73 --- /dev/null +++ b/changelog/fragments/1732043830-Fix-audit-unenroll-call-when-running-fleet-server.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix audit/unenroll call when running fleet-server + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +description: Fix the call to the audit/unenroll endpoint that occurs on uninstall when the fleet-server is running locally. + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: fleet-server + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6085 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/5752 diff --git a/changelog/fragments/1733936340-add-retries-for-download-upgrade-verifiers.yaml b/changelog/fragments/1733936340-add-retries-for-download-upgrade-verifiers.yaml new file mode 100644 index 00000000000..29ece746651 --- /dev/null +++ b/changelog/fragments/1733936340-add-retries-for-download-upgrade-verifiers.yaml @@ -0,0 +1,30 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: added retries for requesting download verifiers when upgrading the agent + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: "elastic-agent" +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6276 +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/changelog/fragments/1734098868-prevent-leaking-secrets-when-logging-component-model.yaml b/changelog/fragments/1734098868-prevent-leaking-secrets-when-logging-component-model.yaml new file mode 100644 index 00000000000..1a8c13ff8ba --- /dev/null +++ b/changelog/fragments/1734098868-prevent-leaking-secrets-when-logging-component-model.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: enhancement + +# Change summary; a 80ish characters long description of the change. +summary: Prevent leaking secrets when logging the component model + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/deploy/helm/elastic-agent/Chart.yaml b/deploy/helm/elastic-agent/Chart.yaml index ed0a65b6e2c..1c76cd5095c 100644 --- a/deploy/helm/elastic-agent/Chart.yaml +++ b/deploy/helm/elastic-agent/Chart.yaml @@ -3,4 +3,5 @@ name: elastic-agent description: Elastic-Agent Helm Chart kubeVersion: ">= 1.27.0-0" type: application -version: 0.0.1 +appVersion: 9.0.0 +version: 9.0.0-beta diff --git a/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml index eaa1de45efc..5fc6c5f1c6d 100644 --- a/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -20,7 +20,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -34,7 +34,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -48,7 +48,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -100,7 +100,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -389,7 +389,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -566,7 +566,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -690,7 +690,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -923,7 +923,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1005,7 +1005,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1026,7 +1026,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1047,7 +1047,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1069,7 +1069,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1171,7 +1171,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1239,7 +1239,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml index bc414e82959..981128216cd 100644 --- a/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -18,7 +18,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -38,7 +38,7 @@ kind: ClusterRole metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -144,7 +144,7 @@ kind: ClusterRoleBinding metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -164,7 +164,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml index 0a97ac7514e..918d3797f46 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -18,7 +18,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -30,7 +30,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -42,7 +42,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -99,7 +99,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -393,7 +393,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -575,7 +575,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -697,7 +697,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -928,7 +928,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1008,7 +1008,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1027,7 +1027,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1046,7 +1046,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1066,7 +1066,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1185,7 +1185,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1266,7 +1266,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml index f1c1568bd86..941a2f89621 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -18,7 +18,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -30,7 +30,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -42,7 +42,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -99,7 +99,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -393,7 +393,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -577,7 +577,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -699,7 +699,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -930,7 +930,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1010,7 +1010,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1029,7 +1029,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1048,7 +1048,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1068,7 +1068,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1212,7 +1212,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1293,7 +1293,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml index 68addc0279f..f1d17f4c929 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -18,7 +18,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -110,7 +110,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -190,7 +190,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -210,7 +210,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml index 125cd1184a1..d1823fc104b 100644 --- a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -18,7 +18,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -30,7 +30,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -42,7 +42,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -125,7 +125,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -419,7 +419,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -603,7 +603,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -725,7 +725,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -956,7 +956,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1036,7 +1036,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1055,7 +1055,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1074,7 +1074,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1094,7 +1094,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1228,7 +1228,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1299,7 +1299,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml index 8b6eba6ec5f..1654f966cc8 100644 --- a/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -61,7 +61,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml index 705c78805a3..cc8f9281ec1 100644 --- a/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -18,7 +18,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -182,7 +182,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -262,7 +262,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -282,7 +282,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml index 135b5420341..c878dcd71aa 100644 --- a/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -21,7 +21,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -77,7 +77,7 @@ kind: ClusterRoleBinding metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -99,7 +99,7 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml index 18d944f3a4f..dc1b3ee246e 100644 --- a/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml @@ -6,7 +6,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -63,7 +63,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -357,7 +357,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -539,7 +539,7 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -663,7 +663,7 @@ kind: ClusterRole metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -896,7 +896,7 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -978,7 +978,7 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -997,7 +997,7 @@ kind: ClusterRoleBinding metadata: name: agent-ksmSharded-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1016,7 +1016,7 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1036,7 +1036,7 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1155,7 +1155,7 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 @@ -1236,7 +1236,7 @@ metadata: name: agent-ksmsharded-example namespace: "default" labels: - helm.sh/chart: elastic-agent-0.0.1 + helm.sh/chart: elastic-agent-9.0.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example app.kubernetes.io/version: 9.0.0 diff --git a/go.mod b/go.mod index 7632e2a481a..bd02c18164c 100644 --- a/go.mod +++ b/go.mod @@ -16,10 +16,10 @@ require ( github.com/elastic/beats/v7 v7.0.0-alpha2.0.20241202144630-0eb63c16f21a github.com/elastic/elastic-agent-autodiscover v0.9.0 github.com/elastic/elastic-agent-client/v7 v7.17.0 - github.com/elastic/elastic-agent-libs v0.17.4 + github.com/elastic/elastic-agent-libs v0.17.5 github.com/elastic/elastic-agent-system-metrics v0.11.4 github.com/elastic/elastic-transport-go/v8 v8.6.0 - github.com/elastic/go-elasticsearch/v8 v8.16.0 + github.com/elastic/go-elasticsearch/v8 v8.17.0 github.com/elastic/go-licenser v0.4.2 github.com/elastic/go-sysinfo v1.15.0 github.com/elastic/go-ucfg v0.8.8 @@ -68,12 +68,12 @@ require ( go.opentelemetry.io/collector/component/componentstatus v0.115.0 go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.29.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.27.0 - golang.org/x/term v0.26.0 - golang.org/x/text v0.20.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.25.0 google.golang.org/api v0.199.0 @@ -543,8 +543,8 @@ require ( go.opentelemetry.io/collector/pdata v1.21.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/pipeline v0.116.0 // indirect + go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0 // indirect go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 // indirect go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect go.opentelemetry.io/collector/processor/processortest v0.115.0 // indirect diff --git a/go.sum b/go.sum index c3191eebe2d..b1a8b923b7b 100644 --- a/go.sum +++ b/go.sum @@ -444,8 +444,8 @@ github.com/elastic/elastic-agent-autodiscover v0.9.0 h1:+iWIKh0u3e8I+CJa3FfWe9h0 github.com/elastic/elastic-agent-autodiscover v0.9.0/go.mod h1:5iUxLHhVdaGSWYTveSwfJEY4RqPXTG13LPiFoxcpFd4= github.com/elastic/elastic-agent-client/v7 v7.17.0 h1:TPLrEHF4kJ3RkmQzZPffrniY4WeW4vriHZbOAzM1hFo= github.com/elastic/elastic-agent-client/v7 v7.17.0/go.mod h1:6h+f9QdIr3GO2ODC0Y8+aEXRwzbA5W4eV4dd/67z7nI= -github.com/elastic/elastic-agent-libs v0.17.4 h1:kWK5Kn2EQjM97yHqbeXv+cFAIti4IiI9Qj8huM+lZzE= -github.com/elastic/elastic-agent-libs v0.17.4/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= +github.com/elastic/elastic-agent-libs v0.17.5 h1:oyv5BohMia+49tZnsOmTyRWp5LoZbH8iOmGa7c4TqTs= +github.com/elastic/elastic-agent-libs v0.17.5/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= github.com/elastic/elastic-agent-system-metrics v0.11.4 h1:Z/8CML5RKvGpi6/QUFok1K3EriBAv2kUAXnsk8hCifk= github.com/elastic/elastic-agent-system-metrics v0.11.4/go.mod h1:TTW2ysv78uHBQ68hG8TXiaX1m6f29ZHgGWb8XONYsU8= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= @@ -458,8 +458,8 @@ github.com/elastic/go-docappender/v2 v2.3.2 h1:FJhYgq2DpCaxGaZUquc75dauEzWTWOyWU github.com/elastic/go-docappender/v2 v2.3.2/go.mod h1:5URybRUfmexRMtM/lwvcIRLje3Gsrj15qiiLm41gDrc= github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= -github.com/elastic/go-elasticsearch/v8 v8.16.0 h1:f7bR+iBz8GTAVhwyFO3hm4ixsz2eMaEy0QroYnXV3jE= -github.com/elastic/go-elasticsearch/v8 v8.16.0/go.mod h1:lGMlgKIbYoRvay3xWBeKahAiJOgmFDsjZC39nmO3H64= +github.com/elastic/go-elasticsearch/v8 v8.17.0 h1:e9cWksE/Fr7urDRmGPGp47Nsp4/mvNOrU8As1l2HQQ0= +github.com/elastic/go-elasticsearch/v8 v8.17.0/go.mod h1:lGMlgKIbYoRvay3xWBeKahAiJOgmFDsjZC39nmO3H64= github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U= github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64= github.com/elastic/go-licenser v0.4.2 h1:bPbGm8bUd8rxzSswFOqvQh1dAkKGkgAmrPxbUi+Y9+A= @@ -1622,10 +1622,10 @@ go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtit go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= +go.opentelemetry.io/collector/pipeline v0.116.0 h1:o8eKEuWEszmRpfShy7ElBoQ3Jo6kCi9ucm3yRgdNb9s= +go.opentelemetry.io/collector/pipeline v0.116.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0 h1:vRdnwIU40bYtxntVOmxg4Bhrh9QVKtx5wwlxK21rc1s= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0/go.mod h1:KxDMUOfbVy8lzZ85CZEG3gCJEYMyWiBKdN+HWUwQWTM= go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 h1:dgw1jcE/YVFTs41b3Y7SerU3BBSyMEE93AYV+BAxR8E= @@ -1736,8 +1736,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1933,8 +1933,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1949,8 +1949,8 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1966,8 +1966,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 3c899735da7..718192f55f6 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -1400,6 +1400,7 @@ func (c *Coordinator) refreshComponentModel(ctx context.Context) (err error) { } c.logger.Info("Updating running component model") + c.logger.With("components", model.Components).Debug("Updating running component model") c.runtimeMgr.Update(model) return nil } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go index 7a3f0509a8a..6ca3ea015bb 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go @@ -5,6 +5,7 @@ package composed import ( + "context" goerrors "errors" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" @@ -39,11 +40,11 @@ func NewVerifier(log *logger.Logger, verifiers ...download.Verifier) *Verifier { } // Verify checks the package from configured source. -func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(ctx context.Context, a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { var errs []error for _, verifier := range v.vv { - e := verifier.Verify(a, version, skipDefaultPgp, pgpBytes...) + e := verifier.Verify(ctx, a, version, skipDefaultPgp, pgpBytes...) if e == nil { // Success return nil diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go index dcad62b7cef..ad3e6ffe749 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go @@ -5,6 +5,7 @@ package composed import ( + "context" "errors" "testing" @@ -24,7 +25,7 @@ func (d *ErrorVerifier) Name() string { return "error" } -func (d *ErrorVerifier) Verify(artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { +func (d *ErrorVerifier) Verify(context.Context, artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { d.called = true return errors.New("failing") } @@ -39,7 +40,7 @@ func (d *FailVerifier) Name() string { return "fail" } -func (d *FailVerifier) Verify(artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { +func (d *FailVerifier) Verify(context.Context, artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { d.called = true return &download.InvalidSignatureError{File: "", Err: errors.New("invalid signature")} } @@ -54,7 +55,7 @@ func (d *SuccVerifier) Name() string { return "succ" } -func (d *SuccVerifier) Verify(artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { +func (d *SuccVerifier) Verify(context.Context, artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { d.called = true return nil } @@ -90,7 +91,7 @@ func TestVerifier(t *testing.T) { testVersion := agtversion.NewParsedSemVer(1, 2, 3, "", "") for _, tc := range testCases { d := NewVerifier(log, tc.verifiers[0], tc.verifiers[1], tc.verifiers[2]) - err := d.Verify(artifact.Artifact{Name: "a", Cmd: "a", Artifact: "a/a"}, *testVersion, false) + err := d.Verify(context.Background(), artifact.Artifact{Name: "a", Cmd: "a", Artifact: "a/a"}, *testVersion, false) assert.Equal(t, tc.expectedResult, err == nil) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go index 210905f2047..4d52e61d48e 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go @@ -5,6 +5,7 @@ package fs import ( + "context" "fmt" "net/http" "os" @@ -65,7 +66,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte) (*Veri // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(ctx context.Context, a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return fmt.Errorf("could not get artifact name: %w", err) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go index f27cd899a84..18db67f7b5d 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go @@ -29,12 +29,11 @@ import ( var testVersion = agtversion.NewParsedSemVer(7, 5, 1, "", "") -var ( - agentSpec = artifact.Artifact{ - Name: "Elastic Agent", - Cmd: "elastic-agent", - Artifact: "beat/elastic-agent"} -) +var agentSpec = artifact.Artifact{ + Name: "Elastic Agent", + Cmd: "elastic-agent", + Artifact: "beat/elastic-agent", +} func TestFetchVerify(t *testing.T) { // See docs/pgp-sign-verify-artifact.md for how to generate a key, export @@ -47,7 +46,8 @@ func TestFetchVerify(t *testing.T) { targetPath := filepath.Join("testdata", "download") ctx := context.Background() a := artifact.Artifact{ - Name: "elastic-agent", Cmd: "elastic-agent", Artifact: "beats/elastic-agent"} + Name: "elastic-agent", Cmd: "elastic-agent", Artifact: "beats/elastic-agent", + } version := agtversion.NewParsedSemVer(8, 0, 0, "", "") filename := "elastic-agent-8.0.0-darwin-x86_64.tar.gz" @@ -80,7 +80,7 @@ func TestFetchVerify(t *testing.T) { // first download verify should fail: // download skipped, as invalid package is prepared upfront // verify fails and cleans download - err = verifier.Verify(a, *version, false) + err = verifier.Verify(ctx, a, *version, false) var checksumErr *download.ChecksumMismatchError require.ErrorAs(t, err, &checksumErr) @@ -109,7 +109,7 @@ func TestFetchVerify(t *testing.T) { _, err = os.Stat(ascTargetFilePath) require.NoError(t, err) - err = verifier.Verify(a, *version, false) + err = verifier.Verify(ctx, a, *version, false) require.NoError(t, err) // Bad GPG public key. @@ -126,7 +126,7 @@ func TestFetchVerify(t *testing.T) { // Missing .asc file. { - err = verifier.Verify(a, *version, false) + err = verifier.Verify(ctx, a, *version, false) require.Error(t, err) // Don't delete these files when GPG validation failure. @@ -139,7 +139,7 @@ func TestFetchVerify(t *testing.T) { err = os.WriteFile(targetFilePath+".asc", []byte("bad sig"), 0o600) require.NoError(t, err) - err = verifier.Verify(a, *version, false) + err = verifier.Verify(ctx, a, *version, false) var invalidSigErr *download.InvalidSignatureError assert.ErrorAs(t, err, &invalidSigErr) @@ -157,7 +157,8 @@ func prepareFetchVerifyTests( targetDir, filename, targetFilePath, - hashTargetFilePath string) error { + hashTargetFilePath string, +) error { sourceFilePath := filepath.Join(dropPath, filename) hashSourceFilePath := filepath.Join(dropPath, filename+".sha512") @@ -202,6 +203,7 @@ func TestVerify(t *testing.T) { for _, tc := range tt { t.Run(tc.Name, func(t *testing.T) { + ctx := context.Background() log, obs := loggertest.New("TestVerify") targetDir := t.TempDir() @@ -220,7 +222,7 @@ func TestVerify(t *testing.T) { pgpKey := prepareTestCase(t, agentSpec, testVersion, config) testClient := NewDownloader(config) - artifactPath, err := testClient.Download(context.Background(), agentSpec, testVersion) + artifactPath, err := testClient.Download(ctx, agentSpec, testVersion) require.NoError(t, err, "fs.Downloader could not download artifacts") _, err = testClient.DownloadAsc(context.Background(), agentSpec, *testVersion) require.NoError(t, err, "fs.Downloader could not download artifacts .asc file") @@ -231,7 +233,7 @@ func TestVerify(t *testing.T) { testVerifier, err := NewVerifier(log, config, pgpKey) require.NoError(t, err) - err = testVerifier.Verify(agentSpec, *testVersion, false, tc.RemotePGPUris...) + err = testVerifier.Verify(ctx, agentSpec, *testVersion, false, tc.RemotePGPUris...) require.NoError(t, err) // log message informing remote PGP was skipped @@ -246,7 +248,6 @@ func TestVerify(t *testing.T) { // It creates the necessary key to sing the artifact and returns the public key // to verify the signature. func prepareTestCase(t *testing.T, a artifact.Artifact, version *agtversion.ParsedSemVer, cfg *artifact.Config) []byte { - filename, err := artifact.GetArtifactName(a, *version, cfg.OperatingSystem, cfg.Architecture) require.NoErrorf(t, err, "could not get artifact name") diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go index 0a3decbab87..84eb82dec84 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go @@ -55,7 +55,28 @@ func getTestCases() []testCase { } } -func getElasticCoServer(t *testing.T) (*httptest.Server, []byte) { +type extResCode map[string]struct { + resCode int + count int +} + +type testDials struct { + extResCode +} + +func (td *testDials) withExtResCode(k string, statusCode int, count int) { + td.extResCode[k] = struct { + resCode int + count int + }{statusCode, count} +} + +func (td *testDials) reset() { + *td = testDials{extResCode: make(extResCode)} +} + +func getElasticCoServer(t *testing.T) (*httptest.Server, []byte, *testDials) { + td := testDials{extResCode: make(extResCode)} correctValues := map[string]struct{}{ fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i386.deb"): {}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "amd64.deb"): {}, @@ -81,7 +102,6 @@ func getElasticCoServer(t *testing.T) (*httptest.Server, []byte) { ext = ".tar.gz" } packageName = strings.TrimSuffix(packageName, ext) - switch ext { case ".sha512": resp = []byte(fmt.Sprintf("%x %s", hash, packageName)) @@ -103,11 +123,17 @@ func getElasticCoServer(t *testing.T) (*httptest.Server, []byte) { return } + if v, ok := td.extResCode[ext]; ok && v.count != 0 { + w.WriteHeader(v.resCode) + v.count-- + td.extResCode[ext] = v + } + _, err := w.Write(resp) assert.NoErrorf(t, err, "mock elastic.co server: failes writing response") }) - return httptest.NewServer(handler), pub + return httptest.NewServer(handler), pub, &td } func getElasticCoClient(server *httptest.Server) http.Client { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 1d59da4e977..8cf21a86818 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -44,7 +44,7 @@ func TestDownload(t *testing.T) { log, _ := logger.New("", false) timeout := 30 * time.Second testCases := getTestCases() - server, _ := getElasticCoServer(t) + server, _, _ := getElasticCoServer(t) elasticClient := getElasticCoClient(server) config := &artifact.Config{ @@ -359,7 +359,6 @@ type downloadHttpResponse struct { } func TestDownloadVersion(t *testing.T) { - type fields struct { config *artifact.Config } @@ -485,7 +484,6 @@ func TestDownloadVersion(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - targetDirPath := t.TempDir() handleDownload := func(rw http.ResponseWriter, req *http.Request) { @@ -527,5 +525,4 @@ func TestDownloadVersion(t *testing.T) { assert.Equalf(t, filepath.Join(targetDirPath, tt.want), got, "Download(%v, %v)", tt.args.a, tt.args.version) }) } - } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 4657f92659a..e0abbcc97c6 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -53,6 +53,9 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte) (*Veri httpcommon.WithModRoundtripper(func(rt http.RoundTripper) http.RoundTripper { return download.WithHeaders(rt, download.Headers) }), + httpcommon.WithModRoundtripper(func(rt http.RoundTripper) http.RoundTripper { + return WithBackoff(rt, log) + }), ) if err != nil { return nil, err @@ -88,7 +91,7 @@ func (v *Verifier) Reload(c *artifact.Config) error { // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(ctx context.Context, a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { artifactPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") @@ -98,7 +101,7 @@ func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, return fmt.Errorf("failed to verify SHA512 hash: %w", err) } - if err = v.verifyAsc(a, version, skipDefaultPgp, pgpBytes...); err != nil { + if err = v.verifyAsc(ctx, a, version, skipDefaultPgp, pgpBytes...); err != nil { var invalidSignatureErr *download.InvalidSignatureError if errors.As(err, &invalidSignatureErr) { if err := os.Remove(artifactPath); err != nil { @@ -116,7 +119,7 @@ func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, return nil } -func (v *Verifier) verifyAsc(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultKey bool, pgpSources ...string) error { +func (v *Verifier) verifyAsc(ctx context.Context, a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultKey bool, pgpSources ...string) error { filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") @@ -132,7 +135,7 @@ func (v *Verifier) verifyAsc(a artifact.Artifact, version agtversion.ParsedSemVe return errors.New(err, "composing URI for fetching asc file", errors.TypeNetwork) } - ascBytes, err := v.getPublicAsc(ascURI) + ascBytes, err := v.getPublicAsc(ctx, ascURI) if err != nil { return errors.New(err, fmt.Sprintf("fetching asc file from %s", ascURI), errors.TypeNetwork, errors.M(errors.MetaKeyURI, ascURI)) } @@ -163,8 +166,8 @@ func (v *Verifier) composeURI(filename, artifactName string) (string, error) { return uri.String(), nil } -func (v *Verifier) getPublicAsc(sourceURI string) ([]byte, error) { - ctx, cancelFn := context.WithTimeout(context.Background(), 30*time.Second) +func (v *Verifier) getPublicAsc(ctx context.Context, sourceURI string) ([]byte, error) { + ctx, cancelFn := context.WithTimeout(ctx, 30*time.Second) defer cancelFn() req, err := http.NewRequestWithContext(ctx, http.MethodGet, sourceURI, nil) if err != nil { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go index e477db3e227..2923a4d3845 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go @@ -9,6 +9,7 @@ import ( "fmt" "math/rand/v2" "net/http" + "net/http/httptest" "net/url" "os" @@ -30,7 +31,7 @@ func TestVerify(t *testing.T) { log, _ := logger.New("", false) timeout := 30 * time.Second testCases := getRandomTestCases()[0:1] - server, pub := getElasticCoServer(t) + server, pub, td := getElasticCoServer(t) config := &artifact.Config{ SourceURI: server.URL + "/downloads", @@ -41,7 +42,7 @@ func TestVerify(t *testing.T) { } t.Run("without proxy", func(t *testing.T) { - runTests(t, testCases, config, log, pub) + runTests(t, testCases, td, config, log, pub) }) t.Run("with proxy", func(t *testing.T) { @@ -72,14 +73,21 @@ func TestVerify(t *testing.T) { URL: (*httpcommon.ProxyURI)(proxyURL), } - runTests(t, testCases, &config, log, pub) + runTests(t, testCases, td, &config, log, pub) }) } -func runTests(t *testing.T, testCases []testCase, config *artifact.Config, log *logger.Logger, pub []byte) { +func runTests(t *testing.T, testCases []testCase, td *testDials, config *artifact.Config, log *logger.Logger, pub []byte) { for _, tc := range testCases { testName := fmt.Sprintf("%s-binary-%s", tc.system, tc.arch) t.Run(testName, func(t *testing.T) { + td.withExtResCode(".asc", 500, 2) + defer td.reset() + + cancelDeadline := time.Now().Add(config.Timeout) + cancelCtx, cancel := context.WithDeadline(context.Background(), cancelDeadline) + defer cancel() + config.OperatingSystem = tc.system config.Architecture = tc.arch @@ -88,7 +96,7 @@ func runTests(t *testing.T, testCases []testCase, config *artifact.Config, log * downloader, err := NewDownloader(log, config, upgradeDetails) require.NoError(t, err, "could not create new downloader") - pkgPath, err := downloader.Download(context.Background(), beatSpec, version) + pkgPath, err := downloader.Download(cancelCtx, beatSpec, version) require.NoErrorf(t, err, "failed downloading %s v%s", beatSpec.Artifact, version) @@ -102,7 +110,7 @@ func runTests(t *testing.T, testCases []testCase, config *artifact.Config, log * t.Fatal(err) } - err = testVerifier.Verify(beatSpec, *version, false) + err = testVerifier.Verify(cancelCtx, beatSpec, *version, false) require.NoError(t, err) }) } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verify_backoff_rtt.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verify_backoff_rtt.go new file mode 100644 index 00000000000..0f90e7ae657 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verify_backoff_rtt.go @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package http + +import ( + "bytes" + "fmt" + "io" + "net/http" + "time" + + "github.com/cenkalti/backoff/v4" + + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func WithBackoff(rtt http.RoundTripper, logger *logger.Logger) http.RoundTripper { + if rtt == nil { + rtt = http.DefaultTransport + } + + return &BackoffRoundTripper{next: rtt, logger: logger} +} + +type BackoffRoundTripper struct { + next http.RoundTripper + logger *logger.Logger +} + +func (btr *BackoffRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + exp := backoff.NewExponentialBackOff() + boCtx := backoff.WithContext(exp, req.Context()) + + opNotify := func(err error, retryAfter time.Duration) { + btr.logger.Warnf("request failed: %s, retrying in %s", err, retryAfter) + } + + var resp *http.Response + var err error + var resettableBody *bytes.Reader + + if req.Body != nil { + data, err := io.ReadAll(req.Body) + if err != nil { + return nil, fmt.Errorf("failed to read request body: %w", err) + } + req.Body.Close() + + resettableBody = bytes.NewReader(data) + req.Body = io.NopCloser(resettableBody) + } + // opFunc implements the retry logic for the backoff mechanism. + // + // - For each attempt, the request body is reset (if non-nil) to allow reuse. + // - Requests with errors or responses with status >= 400 trigger retries. + // - The response body is closed for failed requests to free resources. + // - A successful request (status < 400) stops the retries and returns the response. + attempt := 1 + opFunc := func() error { + if resettableBody != nil { + _, err = resettableBody.Seek(0, io.SeekStart) + if err != nil { + btr.logger.Errorf("error while resetting request body: %w", err) + } + } + + attempt++ + resp, err = btr.next.RoundTrip(req) //nolint:bodyclose // the response body is closed when status code >= 400 or it is closed by the caller + if err != nil { + btr.logger.Errorf("attempt %d: error round-trip: %w", err) + return err + } + + if resp.StatusCode >= 400 { + if err := resp.Body.Close(); err != nil { + btr.logger.Errorf("attempt %d: error closing the response body: %w", attempt, err) + } + btr.logger.Errorf("attempt %d: received response status: %d", attempt, resp.StatusCode) + return errors.New(fmt.Sprintf("received response status: %d", resp.StatusCode)) + } + + return nil + } + + return resp, backoff.RetryNotify(opFunc, boCtx, opNotify) +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verify_backoff_rtt_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verify_backoff_rtt_test.go new file mode 100644 index 00000000000..436911ceceb --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verify_backoff_rtt_test.go @@ -0,0 +1,83 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package http + +import ( + "bytes" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func TestVerifyBackoffRoundtripper(t *testing.T) { + t.Run("test get request retry", func(t *testing.T) { + failedResCounter := 2 + handler := func(rw http.ResponseWriter, req *http.Request) { + if failedResCounter > 0 { + rw.WriteHeader(http.StatusInternalServerError) + failedResCounter-- + } + _, err := rw.Write([]byte("hello")) + require.NoError(t, err) + } + server := httptest.NewServer(http.HandlerFunc(handler)) + client := http.Client{ + Transport: WithBackoff(&http.Transport{}, logp.NewLogger("testing")), + Timeout: 10 * time.Second, + } + + res, err := client.Get(server.URL) //nolint:noctx // test code + require.NoError(t, err) + defer res.Body.Close() + + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + + require.Equal(t, string(body), "hello") + require.Equal(t, res.StatusCode, 200) + require.Equal(t, failedResCounter, 0) + }) + + t.Run("test post request with body", func(t *testing.T) { + failedResCounter := 2 + handler := func(rw http.ResponseWriter, req *http.Request) { + if failedResCounter > 0 { + rw.WriteHeader(http.StatusInternalServerError) + failedResCounter-- + } + + body, err := io.ReadAll(req.Body) + require.NoError(t, err) + defer req.Body.Close() + + _, err = rw.Write(body) + require.NoError(t, err) + } + server := httptest.NewServer(http.HandlerFunc(handler)) + client := http.Client{ + Transport: WithBackoff(&http.Transport{}, logp.NewLogger("testing")), + Timeout: 10 * time.Second, + } + + reqReader := bytes.NewReader([]byte("hello")) + + resp, err := client.Post(server.URL, "text/html", reqReader) //nolint:noctx // test code + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, string(body), "hello") + require.Equal(t, resp.StatusCode, 200) + require.Equal(t, failedResCounter, 0) + }) +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index 9910e0e06da..7ce05055d60 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -30,7 +30,6 @@ func (v *Verifier) Name() string { // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte, versionOverride *agtversion.ParsedSemVer) (download.Verifier, error) { - client, err := config.HTTPTransportSettings.Client(httpcommon.WithAPMHTTPInstrumentation()) if err != nil { return nil, err @@ -54,9 +53,9 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte, versio } // Verify checks the package from configured source. -func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(ctx context.Context, a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { strippedVersion := agtversion.NewParsedSemVer(version.Major(), version.Minor(), version.Patch(), version.Prerelease(), "") - return v.verifier.Verify(a, *strippedVersion, skipDefaultPgp, pgpBytes...) + return v.verifier.Verify(ctx, a, *strippedVersion, skipDefaultPgp, pgpBytes...) } func (v *Verifier) Reload(c *artifact.Config) error { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go index 3c2cf06715c..67d16076f4e 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go @@ -2,6 +2,8 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. +// you may not use this file except in compliance with the Elastic License 2.0. + package download import ( @@ -84,7 +86,7 @@ type Verifier interface { // If the checksum does no match Verify returns a *download.ChecksumMismatchError. // If the PGP signature check fails then Verify returns a // *download.InvalidSignatureError. - Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error + Verify(ctx context.Context, a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error } // VerifySHA512HashWithCleanup calls VerifySHA512Hash and, in case of a @@ -210,7 +212,8 @@ func readChecksumFile(checksumFile, filename string) (string, error) { } func VerifyPGPSignatureWithKeys( - log infoWarnLogger, file string, asciiArmorSignature []byte, publicKeys [][]byte) error { + log infoWarnLogger, file string, asciiArmorSignature []byte, publicKeys [][]byte, +) error { var err error for i, key := range publicKeys { err = VerifyPGPSignature(file, asciiArmorSignature, key) diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index fb38e93972c..58d56c81f52 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -116,7 +116,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, parsedVersion *agtversi } } - if err := verifier.Verify(agentArtifact, *parsedVersion, skipDefaultPgp, pgpBytes...); err != nil { + if err := verifier.Verify(ctx, agentArtifact, *parsedVersion, skipDefaultPgp, pgpBytes...); err != nil { return "", errors.New(err, "failed verification of agent binary") } return path, nil diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 2bd3b52f727..f08e753964d 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -20,7 +20,6 @@ import ( "github.com/schollz/progressbar/v3" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -48,6 +47,13 @@ var ( fleetAuditWaitMax = time.Second * 10 ) +// agentInfo is a custom type that implements the fleetapi.AgentInfo interface +type agentInfo string + +func (a *agentInfo) AgentID() string { + return string(*a) +} + // Uninstall uninstalls persistently Elastic Agent on the system. func Uninstall(ctx context.Context, cfgFile, topPath, uninstallToken string, log *logp.Logger, pt *progressbar.ProgressBar) error { cwd, err := os.Getwd() @@ -59,6 +65,49 @@ func Uninstall(ctx context.Context, cfgFile, topPath, uninstallToken string, log return fmt.Errorf("uninstall must be run from outside the installed path '%s'", topPath) } + // check if the agent was installed using --unprivileged by checking the file vault for the agent secret (needed on darwin to correctly load the vault) + unprivileged, err := checkForUnprivilegedVault(ctx) + if err != nil { + return fmt.Errorf("error checking for unprivileged vault: %w", err) + } + + // will only notify fleet of the uninstall command if it can gather config and agentinfo, and is not a stand-alone install + localFleet := false + notifyFleet := false + var agentID agentInfo + var cfg *configuration.Configuration + func() { // check if we need to notify in a func to allow us to return early if a (non-fatal) error is encountered. + // read local config + c, err := operations.LoadFullAgentConfig(ctx, log, cfgFile, false, unprivileged) + if err != nil { + pt.Describe("notify Fleet failed: unable to read config") + return + } + cfg, err = configuration.NewFromConfig(c) + if err != nil { + pt.Describe("notify Fleet failed: error transforming config") + return + } + + if cfg != nil && !configuration.IsStandalone(cfg.Fleet) { + agentID = agentInfo(cfg.Settings.ID) + notifyFleet = true + if cfg.Fleet != nil && cfg.Fleet.Server != nil { + localFleet = true + } + } + }() + + // Notify fleet-server while it is still running if it's running locally + if notifyFleet && localFleet { + // host is set in the agent/cmd/enroll_cmd.go by createFleetServerBootstrapConfig + // hosts is set in agent/application/actions/handlers/handler_action_policy_change.go by updateFleetConfig + // agents running the fleet-server integration should communicate over the internal API (defaults to localhost:8221) + // This may need to be fixed with https://github.com/elastic/elastic-agent/issues/4771 + cfg.Fleet.Client.Hosts = []string{cfg.Fleet.Client.Host} + notifyFleetAuditUninstall(ctx, log, pt, cfg, &agentID) //nolint:errcheck // ignore the error as we can't act on it + } + // ensure service is stopped status, err := EnsureStoppedService(topPath, pt) if err != nil { @@ -71,12 +120,6 @@ func Uninstall(ctx context.Context, cfgFile, topPath, uninstallToken string, log return fmt.Errorf("failed trying to kill any running watcher: %w", err) } - // check if the agent was installed using --unprivileged by checking the file vault for the agent secret (needed on darwin to correctly load the vault) - unprivileged, err := checkForUnprivilegedVault(ctx) - if err != nil { - return fmt.Errorf("error checking for unprivileged vault: %w", err) - } - // Uninstall components first if err := uninstallComponents(ctx, cfgFile, uninstallToken, log, pt, unprivileged); err != nil { // If service status was running it was stopped to uninstall the components. @@ -111,27 +154,6 @@ func Uninstall(ctx context.Context, cfgFile, topPath, uninstallToken string, log } } - // will only notify fleet of the uninstall command if it can gather config and agentinfo, and is not a stand-alone install - notifyFleet := false - var ai *info.AgentInfo - c, err := operations.LoadFullAgentConfig(ctx, log, cfgFile, false, unprivileged) - if err != nil { - pt.Describe(fmt.Sprintf("unable to read agent config to determine if notifying Fleet is needed: %v", err)) - } - cfg, err := configuration.NewFromConfig(c) - if err != nil { - pt.Describe(fmt.Sprintf("notify Fleet: unable to transform *config.Config to *configuration.Configuration: %v", err)) - } - - if cfg != nil && !configuration.IsStandalone(cfg.Fleet) { - ai, err = info.NewAgentInfo(ctx, false) - if err != nil { - pt.Describe(fmt.Sprintf("unable to read agent info, Fleet will not be notified of uninstall: %v", err)) - } else { - notifyFleet = true - } - } - // remove existing directory pt.Describe("Removing install directory") err = RemovePath(topPath) @@ -144,8 +166,8 @@ func Uninstall(ctx context.Context, cfgFile, topPath, uninstallToken string, log } pt.Describe("Removed install directory") - if notifyFleet { - notifyFleetAuditUninstall(ctx, log, pt, cfg, ai) //nolint:errcheck // ignore the error as we can't act on it + if notifyFleet && !localFleet { + notifyFleetAuditUninstall(ctx, log, pt, cfg, &agentID) //nolint:errcheck // ignore the error as we can't act on it } return nil @@ -154,7 +176,7 @@ func Uninstall(ctx context.Context, cfgFile, topPath, uninstallToken string, log // notifyFleetAuditUninstall will attempt to notify fleet-server of the agent's uninstall. // // There are retries for the attempt after a 10s wait, but it is a best-effort approach. -func notifyFleetAuditUninstall(ctx context.Context, log *logp.Logger, pt *progressbar.ProgressBar, cfg *configuration.Configuration, ai *info.AgentInfo) error { +func notifyFleetAuditUninstall(ctx context.Context, log *logp.Logger, pt *progressbar.ProgressBar, cfg *configuration.Configuration, ai fleetapi.AgentInfo) error { ctx, cancel := context.WithCancel(ctx) defer cancel() pt.Describe("Attempting to notify Fleet of uninstall") diff --git a/internal/pkg/agent/install/uninstall_test.go b/internal/pkg/agent/install/uninstall_test.go index c07c4e3ade6..19b5afdee95 100644 --- a/internal/pkg/agent/install/uninstall_test.go +++ b/internal/pkg/agent/install/uninstall_test.go @@ -22,7 +22,6 @@ import ( "go.uber.org/zap" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -178,7 +177,7 @@ func TestNotifyFleetAuditUnenroll(t *testing.T) { log, _ := logp.NewInMemory("test", zap.NewDevelopmentEncoderConfig()) pt := progressbar.NewOptions(-1, progressbar.OptionSetWriter(io.Discard)) - ai := &info.AgentInfo{} + var agentID agentInfo = "testID" for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -194,7 +193,7 @@ func TestNotifyFleetAuditUnenroll(t *testing.T) { }, }, } - err := notifyFleetAuditUninstall(context.Background(), log, pt, cfg, ai) + err := notifyFleetAuditUninstall(context.Background(), log, pt, cfg, &agentID) if tc.err == nil { assert.NoError(t, err) } else { @@ -222,7 +221,7 @@ func TestNotifyFleetAuditUnenroll(t *testing.T) { }, }, } - err := notifyFleetAuditUninstall(context.Background(), log, pt, cfg, ai) + err := notifyFleetAuditUninstall(context.Background(), log, pt, cfg, &agentID) assert.EqualError(t, err, "notify Fleet: failed") }) diff --git a/internal/pkg/fleetapi/ack_cmd.go b/internal/pkg/fleetapi/ack_cmd.go index 72abcfd6373..1feb8e8ee30 100644 --- a/internal/pkg/fleetapi/ack_cmd.go +++ b/internal/pkg/fleetapi/ack_cmd.go @@ -23,7 +23,7 @@ const ackPath = "/api/fleet/agents/%s/acks" type AckEvent struct { EventType string `json:"type"` // 'STATE' | 'ERROR' | 'ACTION_RESULT' | 'ACTION' SubType string `json:"subtype"` // 'RUNNING','STARTING','IN_PROGRESS','CONFIG','FAILED','STOPPING','STOPPED','DATA_DUMP','ACKNOWLEDGED','UNKNOWN'; - Timestamp string `json:"timestamp"` // : '2019-01-05T14:32:03.36764-05:00', + Timestamp string `json:"timestamp"` // : '2019-01-05T14:32:03.36764-05:00' ActionID string `json:"action_id"` // : '48cebde1-c906-4893-b89f-595d943b72a2', AgentID string `json:"agent_id"` // : 'agent1', Message string `json:"message,omitempty"` // : 'hello2', @@ -84,11 +84,11 @@ func (e *AckResponse) Validate() error { // AckCmd is a fleet API command. type AckCmd struct { client client.Sender - info agentInfo + info AgentInfo } // NewAckCmd creates a new api command. -func NewAckCmd(info agentInfo, client client.Sender) *AckCmd { +func NewAckCmd(info AgentInfo, client client.Sender) *AckCmd { return &AckCmd{ client: client, info: info, diff --git a/internal/pkg/fleetapi/audit_unenroll_cmd.go b/internal/pkg/fleetapi/audit_unenroll_cmd.go index 841dc13b3af..fd1378a37d5 100644 --- a/internal/pkg/fleetapi/audit_unenroll_cmd.go +++ b/internal/pkg/fleetapi/audit_unenroll_cmd.go @@ -58,10 +58,10 @@ func (e *AuditUnenrollRequest) Validate() error { type AuditUnenrollCmd struct { client client.Sender - info agentInfo + info AgentInfo } -func NewAuditUnenrollCmd(info agentInfo, client client.Sender) *AuditUnenrollCmd { +func NewAuditUnenrollCmd(info AgentInfo, client client.Sender) *AuditUnenrollCmd { return &AuditUnenrollCmd{ client: client, info: info, diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 6b420ed8ade..16ce9afe671 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -85,15 +85,15 @@ func (e *CheckinResponse) Validate() error { // CheckinCmd is a fleet API command. type CheckinCmd struct { client client.Sender - info agentInfo + info AgentInfo } -type agentInfo interface { +type AgentInfo interface { AgentID() string } // NewCheckinCmd creates a new api command. -func NewCheckinCmd(info agentInfo, client client.Sender) *CheckinCmd { +func NewCheckinCmd(info AgentInfo, client client.Sender) *CheckinCmd { return &CheckinCmd{ client: client, info: info, diff --git a/magefile.go b/magefile.go index d2a26565946..1e3839d1255 100644 --- a/magefile.go +++ b/magefile.go @@ -114,25 +114,6 @@ var ( goIntegTestTimeout = 2 * time.Hour // goProvisionAndTestTimeout is the timeout used for both provisioning and running tests. goProvisionAndTestTimeout = goIntegTestTimeout + 30*time.Minute - - helmChartsValues = []struct { - path string - versionKeys []string - tagKeys []string - }{ - // elastic-agent Helm Chart - { - helmChartPath, - []string{"agent", "version"}, - []string{"agent", "image", "tag"}, - }, - // edot-collector values file for kube-stack Helm Chart - { - helmOtelChartPath, - []string{"defaultCRConfig", "image", "tag"}, - nil, - }, - } ) func init() { @@ -3423,6 +3404,9 @@ type otelDependencies struct { type Helm mg.Namespace +// RenderExamples runs the equivalent of `helm template` and `helm lint` +// for the examples of the Elastic Helm chart which are located at +// `deploy/helm/elastic-agent/examples` directory. func (Helm) RenderExamples() error { settings := cli.New() // Helm CLI settings actionConfig := &action.Configuration{} @@ -3503,71 +3487,43 @@ func (Helm) RenderExamples() error { return nil } +// UpdateAgentVersion updates the agent version in the Elastic-Agent and EDOT-Collector Helm charts. func (Helm) UpdateAgentVersion() error { - for _, chart := range helmChartsValues { - valuesFile := filepath.Join(chart.path, "values.yaml") - - data, err := os.ReadFile(valuesFile) - if err != nil { - return fmt.Errorf("failed to read file: %w", err) - } - - isTagged, err := devtools.TagContainsCommit() - if err != nil { - return fmt.Errorf("failed to check if tag contains commit: %w", err) - } - - if !isTagged { - isTagged = os.Getenv(snapshotEnv) != "" - } - - agentVersion := getVersion() - - // Parse YAML into a Node structure because - // it maintains comments - var rootNode yaml.Node - err = yaml.Unmarshal(data, &rootNode) - if err != nil { - return fmt.Errorf("failed to unmarshal YAML: %w", err) - } - - if rootNode.Kind != yaml.DocumentNode { - return fmt.Errorf("root node is not a document node") - } else if len(rootNode.Content) == 0 { - return fmt.Errorf("root node has no content") - } - - if err := updateYamlNodes(rootNode.Content[0], agentVersion, chart.versionKeys...); err != nil { + agentVersion := bversion.GetParsedAgentPackageVersion().CoreVersion() + agentSnapshotVersion := agentVersion + "-SNAPSHOT" + // until the Helm chart reaches GA this remains with -beta suffix + agentChartVersion := agentVersion + "-beta" + + for yamlFile, keyVals := range map[string][]struct { + key string + value string + }{ + // values file for elastic-agent Helm Chart + filepath.Join(helmChartPath, "values.yaml"): { + {"agent.version", agentVersion}, + // always use the SNAPSHOT version for image tag + // for the chart that resides in the git repo + {"agent.image.tag", agentSnapshotVersion}, + }, + // Chart.yaml for elastic-agent Helm Chart + filepath.Join(helmChartPath, "Chart.yaml"): { + {"appVersion", agentVersion}, + {"version", agentChartVersion}, + }, + // edot-collector values file for kube-stack Helm Chart + filepath.Join(helmOtelChartPath, "values.yaml"): { + {"defaultCRConfig.image.tag", agentVersion}, + }, + } { + if err := updateYamlFile(yamlFile, keyVals...); err != nil { return fmt.Errorf("failed to update agent version: %w", err) } - - if !isTagged && len(chart.tagKeys) > 0 { - if err := updateYamlNodes(rootNode.Content[0], fmt.Sprintf("%s-SNAPSHOT", agentVersion), chart.tagKeys...); err != nil { - return fmt.Errorf("failed to update agent image tag: %w", err) - } - } - - // Truncate values file - file, err := os.Create(valuesFile) - if err != nil { - return fmt.Errorf("failed to open file for writing: %w", err) - } - defer file.Close() - - // Create a YAML encoder with 2-space indentation - encoder := yaml.NewEncoder(file) - encoder.SetIndent(2) - - // Encode the updated YAML node back to the file - err = encoder.Encode(&rootNode) - if err != nil { - return fmt.Errorf("failed to encode updated YAML: %w", err) - } } return nil } +// Lint lints the Elastic-Agent Helm chart. func (Helm) Lint() error { settings := cli.New() // Helm CLI settings actionConfig := &action.Configuration{} @@ -3586,6 +3542,54 @@ func (Helm) Lint() error { return nil } +func updateYamlFile(path string, keyVal ...struct { + key string + value string +}) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + // Parse YAML into a Node structure because + // it maintains comments + var rootNode yaml.Node + err = yaml.Unmarshal(data, &rootNode) + if err != nil { + return fmt.Errorf("failed to unmarshal YAML: %w", err) + } + + if rootNode.Kind != yaml.DocumentNode { + return fmt.Errorf("root node is not a document node") + } else if len(rootNode.Content) == 0 { + return fmt.Errorf("root node has no content") + } + + for _, kv := range keyVal { + if err := updateYamlNodes(rootNode.Content[0], kv.value, strings.Split(kv.key, ".")...); err != nil { + return fmt.Errorf("failed to update agent version: %w", err) + } + } + + // Truncate values file + file, err := os.Create(path) + if err != nil { + return fmt.Errorf("failed to open file for writing: %w", err) + } + defer file.Close() + + // Create a YAML encoder with 2-space indentation + encoder := yaml.NewEncoder(file) + encoder.SetIndent(2) + + // Encode the updated YAML node back to the file + err = encoder.Encode(&rootNode) + if err != nil { + return fmt.Errorf("failed to encode updated YAML: %w", err) + } + return nil +} + func updateYamlNodes(rootNode *yaml.Node, value string, keys ...string) error { if len(keys) == 0 { return fmt.Errorf("no keys provided") diff --git a/pkg/component/component.go b/pkg/component/component.go index 06a9820d8ec..21e87ea5233 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -5,6 +5,7 @@ package component import ( + "encoding/json" "errors" "fmt" "sort" @@ -173,6 +174,39 @@ func (c Component) MarshalYAML() (interface{}, error) { return c, nil } +func (c *Component) MarshalJSON() ([]byte, error) { + marshalableComponent := struct { + ID string `json:"ID"` + InputType string `json:"InputType"` + OutputType string `json:"OutputType"` + ErrMsg string `json:"ErrMsg,omitempty"` + Units []struct { + ID string `json:"ID"` + ErrMsg string `json:"ErrMsg,omitempty"` + } `json:"Units"` + }{ + ID: c.ID, + InputType: c.InputType, + OutputType: c.OutputType, + } + if c.Err != nil { + marshalableComponent.ErrMsg = c.Err.Error() + } + for i := range c.Units { + marshalableComponent.Units = append(marshalableComponent.Units, struct { + ID string `json:"ID"` + ErrMsg string `json:"ErrMsg,omitempty"` + }{ + ID: c.Units[i].ID, + }) + if c.Units[i].Err != nil { + marshalableComponent.Units[i].ErrMsg = c.Units[i].Err.Error() + } + } + + return json.Marshal(marshalableComponent) +} + // Type returns the type of the component. func (c *Component) Type() string { if c.InputSpec != nil { diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index 8ddbf65f931..995b223d91d 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -59,6 +59,32 @@ func TestComponentMarshalError(t *testing.T) { require.Contains(t, string(outData), "test error value") } +func TestMarshalJSON(t *testing.T) { + testComponent := Component{ + ID: "test-device", + Err: testErr{data: "test error value"}, + Units: []Unit{ + { + ID: "test-unit", + Config: &proto.UnitExpectedConfig{ + Source: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "api_key": structpb.NewStringValue("test-api-key"), + }, + }, + }, + }, + }, + } + + marshaledBytes, err := testComponent.MarshalJSON() + require.NoError(t, err) + marshaledJsonString := string(marshaledBytes) + assert.Contains(t, marshaledJsonString, "test error value") + assert.Contains(t, marshaledJsonString, "test-unit") + assert.NotContains(t, marshaledJsonString, "test-api-key") +} + func TestToComponents(t *testing.T) { linuxAMD64Platform := PlatformDetail{ Platform: Platform{ diff --git a/testing/integration/fleetserver_test.go b/testing/integration/fleetserver_test.go index 51d4561b403..2121abd76fa 100644 --- a/testing/integration/fleetserver_test.go +++ b/testing/integration/fleetserver_test.go @@ -47,17 +47,9 @@ func TestInstallFleetServerBootstrap(t *testing.T) { Local: false, }) - t.Skip("Skip until the first 8.16.0-SNAPSHOT is available") - ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() - // Get path to Elastic Agent executable - fixture, err := define.NewFixtureFromLocalBuild(t, define.Version(), atesting.WithAdditionalArgs([]string{"-E", "output.elasticsearch.allow_older_versions=true"})) - require.NoError(t, err) - err = fixture.Prepare(ctx) - require.NoError(t, err) - t.Log("Ensure base path is clean") var defaultBasePath string switch runtime.GOOS { @@ -70,7 +62,7 @@ func TestInstallFleetServerBootstrap(t *testing.T) { } topPath := filepath.Join(defaultBasePath, "Elastic", "Agent") - err = os.RemoveAll(topPath) + err := os.RemoveAll(topPath) require.NoError(t, err, "failed to remove %q. The test requires this path not to exist.") t.Log("Create fleet-server policy...") @@ -105,64 +97,124 @@ func TestInstallFleetServerBootstrap(t *testing.T) { t.Logf("fleet-server will enroll with es host: %q", esHost) - // Run `elastic-agent install` with fleet-server bootstrap options. - // We use `--force` to prevent interactive execution. - opts := &atesting.InstallOpts{ - Force: true, - Privileged: true, - FleetBootstrapOpts: atesting.FleetBootstrapOpts{ - ESHost: esHost, - ServiceToken: serviceToken, - Policy: policy.ID, - Port: 8220, - }, - } - out, err := fixture.Install(ctx, opts) - if err != nil { - t.Logf("Install output: %s", out) - require.NoError(t, err, "unable to install elastic-agent with fleet-server bootstrap options") - } + t.Run("privileged", func(t *testing.T) { + // Get path to Elastic Agent executable + fixture, err := define.NewFixtureFromLocalBuild(t, define.Version(), atesting.WithAdditionalArgs([]string{"-E", "output.elasticsearch.allow_older_versions=true"})) + require.NoError(t, err) + err = fixture.Prepare(ctx) + require.NoError(t, err) + + // Run `elastic-agent install` with fleet-server bootstrap options. + // We use `--force` to prevent interactive execution. + opts := &atesting.InstallOpts{ + Force: true, + Privileged: true, + FleetBootstrapOpts: atesting.FleetBootstrapOpts{ + ESHost: esHost, + ServiceToken: serviceToken, + Policy: policy.ID, + Port: 8220, + }, + } + out, err := fixture.Install(ctx, opts) + if err != nil { + t.Logf("Install output: %s", out) + require.NoError(t, err, "unable to install elastic-agent with fleet-server bootstrap options") + } - // checkInstallSuccess(t, fixture, topPath, true) // FIXME fails to build if this is uncommented, but the method is part of install_test.go - t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) + // checkInstallSuccess(t, fixture, topPath, true) // FIXME fails to build if this is uncommented, but the method is part of install_test.go + t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) + t.Run("check fleet-server api", testFleetServerInternalAPI()) + + // Make sure uninstall from within the topPath fails on Windows + if runtime.GOOS == "windows" { + cwd, err := os.Getwd() + require.NoErrorf(t, err, "GetWd failed: %s", err) + err = os.Chdir(topPath) + require.NoErrorf(t, err, "Chdir to topPath failed: %s", err) + t.Cleanup(func() { + _ = os.Chdir(cwd) + }) + out, err = fixture.Uninstall(ctx, &atesting.UninstallOpts{Force: true}) + require.Error(t, err, "uninstall should have failed") + require.Containsf(t, string(out), "uninstall must be run from outside the installed path", "expected error string not found in: %s err: %s", out, err) + } - // elastic-agent will self sign a cert to use with fleet-server if one is not passed - // in order to interact with the API we need to ignore the cert. - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - }, - } - fleetOK := false - for i := 0; i < 10; i++ { - t.Log("Checking fleet-server status") - resp, err := client.Get("https://localhost:8220/api/status") + t.Run("Test audit/unenroll", testUninstallAuditUnenroll(ctx, fixture, info)) + }) + t.Run("unprivileged", func(t *testing.T) { + // Get path to Elastic Agent executable + fixture, err := define.NewFixtureFromLocalBuild(t, define.Version(), atesting.WithAdditionalArgs([]string{"-E", "output.elasticsearch.allow_older_versions=true"})) + require.NoError(t, err) + err = fixture.Prepare(ctx) + require.NoError(t, err) + + // Run `elastic-agent install` with fleet-server bootstrap options. + // We use `--force` to prevent interactive execution. + opts := &atesting.InstallOpts{ + Force: true, + Privileged: false, + FleetBootstrapOpts: atesting.FleetBootstrapOpts{ + ESHost: esHost, + ServiceToken: serviceToken, + Policy: policy.ID, + Port: 8220, + }, + } + out, err := fixture.Install(ctx, opts) if err != nil { - t.Logf("fleet-server status check returned error: %v, retry in 10s...", err) + t.Logf("Install output: %s", out) + require.NoError(t, err, "unable to install elastic-agent with fleet-server bootstrap options") + } + + // checkInstallSuccess(t, fixture, topPath, true) // FIXME fails to build if this is uncommented, but the method is part of install_test.go + t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) + t.Run("check fleet-server api", testFleetServerInternalAPI()) + + // Make sure uninstall from within the topPath fails on Windows + if runtime.GOOS == "windows" { + cwd, err := os.Getwd() + require.NoErrorf(t, err, "GetWd failed: %s", err) + err = os.Chdir(topPath) + require.NoErrorf(t, err, "Chdir to topPath failed: %s", err) + t.Cleanup(func() { + _ = os.Chdir(cwd) + }) + out, err = fixture.Uninstall(ctx, &atesting.UninstallOpts{Force: true}) + require.Error(t, err, "uninstall should have failed") + require.Containsf(t, string(out), "uninstall must be run from outside the installed path", "expected error string not found in: %s err: %s", out, err) + } + + t.Run("Test audit/unenroll", testUninstallAuditUnenroll(ctx, fixture, info)) + }) +} + +func testFleetServerInternalAPI() func(t *testing.T) { + return func(t *testing.T) { + // elastic-agent will self sign a cert to use with fleet-server if one is not passed + // in order to interact with the API we need to ignore the cert. + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + fleetOK := false + for i := 0; i < 10; i++ { + t.Log("Checking fleet-server status") + resp, err := client.Get("https://localhost:8220/api/status") + if err != nil { + t.Logf("fleet-server status check returned error: %v, retry in 10s...", err) + time.Sleep(10 * time.Second) + continue + } + if resp.StatusCode == http.StatusOK { + fleetOK = true + break + } + t.Logf("fleet-server status check returned incorrect status: %d, retry in 10s", resp.StatusCode) time.Sleep(10 * time.Second) continue } - if resp.StatusCode == http.StatusOK { - fleetOK = true - break - } - t.Logf("fleet-server status check returned incorrect status: %d, retry in 10s", resp.StatusCode) - time.Sleep(10 * time.Second) - continue - } - require.True(t, fleetOK, "expected fleet-server /api/status to return 200") - - // Make sure uninstall from within the topPath fails on Windows - if runtime.GOOS == "windows" { - cwd, err := os.Getwd() - require.NoErrorf(t, err, "GetWd failed: %s", err) - err = os.Chdir(topPath) - require.NoErrorf(t, err, "Chdir to topPath failed: %s", err) - t.Cleanup(func() { - _ = os.Chdir(cwd) - }) - out, err = fixture.Uninstall(ctx, &atesting.UninstallOpts{Force: true}) - require.Error(t, err, "uninstall should have failed") - require.Containsf(t, string(out), "uninstall must be run from outside the installed path", "expected error string not found in: %s err: %s", out, err) + require.True(t, fleetOK, "expected fleet-server /api/status to return 200") } } diff --git a/testing/integration/install_test.go b/testing/integration/install_test.go index 31e5214da72..5abe3272388 100644 --- a/testing/integration/install_test.go +++ b/testing/integration/install_test.go @@ -362,9 +362,6 @@ func TestInstallUninstallAudit(t *testing.T) { ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() - fixture, err := define.NewFixtureFromLocalBuild(t, define.Version()) - require.NoError(t, err) - policyResp, enrollmentTokenResp := createPolicyAndEnrollmentToken(ctx, t, info.KibanaClient, createBasicPolicy()) t.Logf("Created policy %+v", policyResp.AgentPolicy) @@ -372,53 +369,97 @@ func TestInstallUninstallAudit(t *testing.T) { fleetServerURL, err := fleettools.DefaultURL(ctx, info.KibanaClient) require.NoError(t, err, "failed getting Fleet Server URL") - err = fixture.Prepare(ctx) - require.NoError(t, err) - // Run `elastic-agent install`. We use `--force` to prevent interactive - // execution. - opts := &atesting.InstallOpts{ - Force: true, - EnrollOpts: atesting.EnrollOpts{ - URL: fleetServerURL, - EnrollmentToken: enrollmentTokenResp.APIKey, - }, - } - out, err := fixture.Install(ctx, opts) - if err != nil { - t.Logf("install output: %s", out) + t.Run("privileged", func(t *testing.T) { + fixture, err := define.NewFixtureFromLocalBuild(t, define.Version()) require.NoError(t, err) - } - require.Eventuallyf(t, func() bool { - return waitForAgentAndFleetHealthy(ctx, t, fixture) - }, time.Minute, time.Second, "agent never became healthy or connected to Fleet") + err = fixture.Prepare(ctx) + require.NoError(t, err) + // Run `elastic-agent install`. We use `--force` to prevent interactive + // execution. + opts := &atesting.InstallOpts{ + Force: true, + Privileged: true, + EnrollOpts: atesting.EnrollOpts{ + URL: fleetServerURL, + EnrollmentToken: enrollmentTokenResp.APIKey, + }, + } + out, err := fixture.Install(ctx, opts) + if err != nil { + t.Logf("install output: %s", out) + require.NoError(t, err) + } - agentID, err := getAgentID(ctx, fixture) - require.NoError(t, err, "error getting the agent inspect output") - require.NotEmpty(t, agentID, "agent ID empty") + require.Eventuallyf(t, func() bool { + return waitForAgentAndFleetHealthy(ctx, t, fixture) + }, time.Minute, time.Second, "agent never became healthy or connected to Fleet") - out, err = fixture.Uninstall(ctx, &atesting.UninstallOpts{Force: true}) - if err != nil { - t.Logf("uninstall output: %s", out) + t.Run("run uninstall", testUninstallAuditUnenroll(ctx, fixture, info)) + }) + + t.Run("unprivileged", func(t *testing.T) { + fixture, err := define.NewFixtureFromLocalBuild(t, define.Version()) require.NoError(t, err) - } - // TODO: replace direct query to ES index with API call to Fleet - // Blocked on https://github.com/elastic/kibana/issues/194884 - response, err := info.ESClient.Get(".fleet-agents", agentID, info.ESClient.Get.WithContext(ctx)) - require.NoError(t, err) - defer response.Body.Close() - p, err := io.ReadAll(response.Body) - require.NoError(t, err) - require.Equalf(t, http.StatusOK, response.StatusCode, "ES status code expected 200, body: %s", p) - var res struct { - Source struct { - AuditUnenrolledReason string `json:"audit_unenrolled_reason"` - } `json:"_source"` + err = fixture.Prepare(ctx) + require.NoError(t, err) + // Run `elastic-agent install`. We use `--force` to prevent interactive + // execution. + opts := &atesting.InstallOpts{ + Force: true, + Privileged: false, + EnrollOpts: atesting.EnrollOpts{ + URL: fleetServerURL, + EnrollmentToken: enrollmentTokenResp.APIKey, + }, + } + out, err := fixture.Install(ctx, opts) + if err != nil { + t.Logf("install output: %s", out) + require.NoError(t, err) + } + + require.Eventuallyf(t, func() bool { + return waitForAgentAndFleetHealthy(ctx, t, fixture) + }, time.Minute, time.Second, "agent never became healthy or connected to Fleet") + + t.Run("run uninstall", testUninstallAuditUnenroll(ctx, fixture, info)) + }) +} + +func testUninstallAuditUnenroll(ctx context.Context, fixture *atesting.Fixture, info *define.Info) func(t *testing.T) { + return func(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skip Windows as it has been disabled because of https://github.com/elastic/elastic-agent/issues/5952") + } + agentID, err := getAgentID(ctx, fixture) + require.NoError(t, err, "error getting the agent inspect output") + require.NotEmpty(t, agentID, "agent ID empty") + + out, err := fixture.Uninstall(ctx, &atesting.UninstallOpts{Force: true}) + if err != nil { + t.Logf("uninstall output: %s", out) + require.NoError(t, err) + } + + // TODO: replace direct query to ES index with API call to Fleet + // Blocked on https://github.com/elastic/kibana/issues/194884 + response, err := info.ESClient.Get(".fleet-agents", agentID, info.ESClient.Get.WithContext(ctx)) + require.NoError(t, err) + defer response.Body.Close() + p, err := io.ReadAll(response.Body) + require.NoError(t, err) + require.Equalf(t, http.StatusOK, response.StatusCode, "ES status code expected 200, body: %s", p) + var res struct { + Source struct { + AuditUnenrolledReason string `json:"audit_unenrolled_reason"` + } `json:"_source"` + } + err = json.Unmarshal(p, &res) + require.NoError(t, err) + require.Equalf(t, "uninstall", res.Source.AuditUnenrolledReason, "uninstall output: %s", out) } - err = json.Unmarshal(p, &res) - require.NoError(t, err) - require.Equal(t, "uninstall", res.Source.AuditUnenrolledReason) } // TestRepeatedInstallUninstallFleet will install then uninstall the agent diff --git a/testing/integration/kubernetes_agent_service_test.go b/testing/integration/kubernetes_agent_service_test.go index ed479c99702..a66d8547d8a 100644 --- a/testing/integration/kubernetes_agent_service_test.go +++ b/testing/integration/kubernetes_agent_service_test.go @@ -7,17 +7,13 @@ package integration import ( - "bufio" - "bytes" "context" - "fmt" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/e2e-framework/klient/k8s" "github.com/elastic/elastic-agent/pkg/testing/define" @@ -35,73 +31,38 @@ func TestKubernetesAgentService(t *testing.T) { Group: define.Kubernetes, }) + // read the service agent config + serviceAgentYAML, err := os.ReadFile(filepath.Join("testdata", "connectors.agent.yml")) + require.NoError(t, err, "failed to read service agent config") + ctx := context.Background() kCtx := k8sGetContext(t, info) - testNamespace := kCtx.getNamespace(t) - - renderedManifest, err := renderKustomize(agentK8SKustomize) - require.NoError(t, err, "failed to render kustomize") - - k8sObjects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) - require.NoError(t, err, "failed to convert yaml to k8s objects") - // add the testNamespace in the k8sObjects - k8sObjects = append([]k8s.Object{&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}}, k8sObjects...) + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") - t.Cleanup(func() { - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sObjects...) - require.NoError(t, err, "failed to delete k8s namespace") - }) - - k8sKustomizeAdjustObjects(k8sObjects, testNamespace, "elastic-agent-standalone", - func(container *corev1.Container) { - // set agent image - container.Image = kCtx.agentImage - // set ImagePullPolicy to "Never" to avoid pulling the image - // as the image is already loaded by the kubernetes provisioner - container.ImagePullPolicy = "Never" - - // set Elasticsearch host and API key - for idx, env := range container.Env { - if env.Name == "ES_HOST" { - container.Env[idx].Value = kCtx.esHost - container.Env[idx].ValueFrom = nil - } - if env.Name == "API_KEY" { - container.Env[idx].Value = kCtx.esAPIKey - container.Env[idx].ValueFrom = nil + testSteps := []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerMemoryLimit: "800Mi", + }, func(obj k8s.Object) { + // update the configmap to only run the connectors input + switch objWithType := obj.(type) { + case *corev1.ConfigMap: + _, ok := objWithType.Data["agent.yml"] + if ok { + objWithType.Data["agent.yml"] = string(serviceAgentYAML) } } - }, - func(pod *corev1.PodSpec) { - for volumeIdx, volume := range pod.Volumes { - // need to update the volume path of the state directory - // to match the test namespace - if volume.Name == "elastic-agent-state" { - hostPathType := corev1.HostPathDirectoryOrCreate - pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ - Type: &hostPathType, - Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), - } - } - } - }) - - // update the configmap to only run the connectors input - serviceAgentYAML, err := os.ReadFile(filepath.Join("testdata", "connectors.agent.yml")) - require.NoError(t, err) - for _, obj := range k8sObjects { - switch objWithType := obj.(type) { - case *corev1.ConfigMap: - _, ok := objWithType.Data["agent.yml"] - if ok { - objWithType.Data["agent.yml"] = string(serviceAgentYAML) - } - } + }), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", map[string]bool{ + "connectors-py": true, + }), } - k8sKustomizeDeployAgent(t, ctx, kCtx.client, k8sObjects, testNamespace, false, kCtx.logsBasePath, - true, map[string]bool{ - "connectors-py": true, - }) + testNamespace := kCtx.getNamespace(t) + for _, step := range testSteps { + step(t, ctx, kCtx, testNamespace) + } } diff --git a/testing/integration/kubernetes_agent_standalone_test.go b/testing/integration/kubernetes_agent_standalone_test.go index 86db561edd8..148706f3654 100644 --- a/testing/integration/kubernetes_agent_standalone_test.go +++ b/testing/integration/kubernetes_agent_standalone_test.go @@ -77,64 +77,84 @@ func TestKubernetesAgentStandaloneKustomize(t *testing.T) { Group: define.Kubernetes, }) + ctx := context.Background() kCtx := k8sGetContext(t, info) - renderedManifest, err := renderKustomize(agentK8SKustomize) - require.NoError(t, err, "failed to render kustomize") + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") testCases := []struct { - name string - runUser *int64 - runGroup *int64 - capabilitiesDrop []corev1.Capability - capabilitiesAdd []corev1.Capability - runK8SInnerTests bool - skipReason string + name string + skipReason string + steps []k8sTestStep }{ { - "default deployment - rootful agent", - nil, - nil, - nil, - nil, - false, - "", + name: "default deployment - rootful agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + }, }, { - "drop ALL capabilities - rootful agent", - int64Ptr(0), - nil, - []corev1.Capability{"ALL"}, - []corev1.Capability{}, - false, - "", + name: "drop ALL capabilities - rootful agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(0), + agentContainerCapabilitiesAdd: []corev1.Capability{}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + }, }, { - "drop ALL add CHOWN, SETPCAP capabilities - rootful agent", - int64Ptr(0), - nil, - []corev1.Capability{"ALL"}, - []corev1.Capability{"CHOWN", "SETPCAP"}, - true, - "", + name: "drop ALL add CHOWN, SETPCAP capabilities - rootful agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(0), + agentContainerCapabilitiesAdd: []corev1.Capability{"CHOWN", "SETPCAP"}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + k8sStepRunInnerTests("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone"), + }, }, { - "drop ALL add CHOWN, SETPCAP capabilities - rootless agent", - int64Ptr(1000), // elastic-agent uid - nil, - []corev1.Capability{"ALL"}, - []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, - true, - "", + name: "drop ALL add CHOWN, SETPCAP, DAC_READ_SEARCH, SYS_PTRACE capabilities - rootless agent", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(1000), + agentContainerRunGroup: int64Ptr(1000), + agentContainerCapabilitiesAdd: []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + k8sStepRunInnerTests("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone"), + }, }, { - "drop ALL add CHOWN, SETPCAP capabilities - rootless agent random uid:gid", - int64Ptr(500), - int64Ptr(500), - []corev1.Capability{"ALL"}, - []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, - true, - "", + name: "drop ALL add CHOWN, SETPCAP, DAC_READ_SEARCH, SYS_PTRACE capabilities - rootless agent random uid:gid", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerRunUser: int64Ptr(500), + agentContainerRunGroup: int64Ptr(500), + agentContainerCapabilitiesAdd: []corev1.Capability{"CHOWN", "SETPCAP", "DAC_READ_SEARCH", "SYS_PTRACE"}, + agentContainerCapabilitiesDrop: []corev1.Capability{"ALL"}, + agentContainerMemoryLimit: "800Mi", + }, nil), + k8sStepCheckAgentStatus("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone", nil), + k8sStepRunInnerTests("app=elastic-agent-standalone", schedulableNodeCount, "elastic-agent-standalone"), + }, }, } @@ -145,72 +165,11 @@ func TestKubernetesAgentStandaloneKustomize(t *testing.T) { t.Skip(tc.skipReason) } - ctx := context.Background() - testNamespace := kCtx.getNamespace(t) - k8sObjects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) - require.NoError(t, err, "failed to convert yaml to k8s objects") - - // add the testNamespace in the beginning of k8sObjects to be created first - k8sObjects = append([]k8s.Object{&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}}, k8sObjects...) - - t.Cleanup(func() { - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sObjects...) - require.NoError(t, err, "failed to delete k8s namespace") - }) - - k8sKustomizeAdjustObjects(k8sObjects, testNamespace, "elastic-agent-standalone", - func(container *corev1.Container) { - // set agent image - container.Image = kCtx.agentImage - // set ImagePullPolicy to "Never" to avoid pulling the image - // as the image is already loaded by the kubernetes provisioner - container.ImagePullPolicy = "Never" - - container.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("800Mi"), - } - - if tc.capabilitiesDrop != nil || tc.capabilitiesAdd != nil || tc.runUser != nil || tc.runGroup != nil { - // set security context - container.SecurityContext = &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Drop: tc.capabilitiesDrop, - Add: tc.capabilitiesAdd, - }, - RunAsUser: tc.runUser, - RunAsGroup: tc.runGroup, - } - } - // set Elasticsearch host and API key - for idx, env := range container.Env { - if env.Name == "ES_HOST" { - container.Env[idx].Value = kCtx.esHost - container.Env[idx].ValueFrom = nil - } - if env.Name == "API_KEY" { - container.Env[idx].Value = kCtx.esAPIKey - container.Env[idx].ValueFrom = nil - } - } - }, - func(pod *corev1.PodSpec) { - for volumeIdx, volume := range pod.Volumes { - // need to update the volume path of the state directory - // to match the test namespace - if volume.Name == "elastic-agent-state" { - hostPathType := corev1.HostPathDirectoryOrCreate - pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ - Type: &hostPathType, - Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), - } - } - } - }) - - k8sKustomizeDeployAgent(t, ctx, kCtx.client, k8sObjects, testNamespace, tc.runK8SInnerTests, - kCtx.logsBasePath, true, nil) + for _, step := range tc.steps { + step(t, ctx, kCtx, testNamespace) + } }) } } @@ -228,85 +187,47 @@ func TestKubernetesAgentOtel(t *testing.T) { Group: define.Kubernetes, }) + ctx := context.Background() kCtx := k8sGetContext(t, info) - renderedManifest, err := renderKustomize(agentK8SKustomize) - require.NoError(t, err, "failed to render kustomize") + nodeList := corev1.NodeList{} + err := kCtx.client.Resources().List(ctx, &nodeList) + require.NoError(t, err) + + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") testCases := []struct { - name string - envAdd []corev1.EnvVar - runK8SInnerTests bool + name string + skipReason string + steps []k8sTestStep }{ { - "run agent in otel mode", - []corev1.EnvVar{ - {Name: "ELASTIC_AGENT_OTEL", Value: "true"}, + name: "run agent in otel mode", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepDeployKustomize(agentK8SKustomize, "elastic-agent-standalone", k8sKustomizeOverrides{ + agentContainerMemoryLimit: "800Mi", + agentContainerExtraEnv: []corev1.EnvVar{{Name: "ELASTIC_AGENT_OTEL", Value: "true"}}, + agentContainerArgs: []string{}, // clear default args + }, nil), }, - false, }, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - testNamespace := kCtx.getNamespace(t) - - k8sObjects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) - require.NoError(t, err, "failed to convert yaml to k8s objects") - - // add the testNamespace in the k8sObjects - k8sObjects = append([]k8s.Object{&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}}, k8sObjects...) - - t.Cleanup(func() { - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sObjects...) - require.NoError(t, err, "failed to delete k8s namespace") - }) - - k8sKustomizeAdjustObjects(k8sObjects, testNamespace, "elastic-agent-standalone", - func(container *corev1.Container) { - // set agent image - container.Image = kCtx.agentImage - // set ImagePullPolicy to "Never" to avoid pulling the image - // as the image is already loaded by the kubernetes provisioner - container.ImagePullPolicy = "Never" - - // set Elasticsearch host and API key - for idx, env := range container.Env { - if env.Name == "ES_HOST" { - container.Env[idx].Value = kCtx.esHost - container.Env[idx].ValueFrom = nil - } - if env.Name == "API_KEY" { - container.Env[idx].Value = kCtx.esAPIKey - container.Env[idx].ValueFrom = nil - } - } - - if len(tc.envAdd) > 0 { - container.Env = append(container.Env, tc.envAdd...) - } + if tc.skipReason != "" { + t.Skip(tc.skipReason) + } - // drop arguments overriding default config - container.Args = []string{} - }, - func(pod *corev1.PodSpec) { - for volumeIdx, volume := range pod.Volumes { - // need to update the volume path of the state directory - // to match the test namespace - if volume.Name == "elastic-agent-state" { - hostPathType := corev1.HostPathDirectoryOrCreate - pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ - Type: &hostPathType, - Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), - } - } - } - }) + testNamespace := kCtx.getNamespace(t) - k8sKustomizeDeployAgent(t, ctx, kCtx.client, k8sObjects, testNamespace, - false, kCtx.logsBasePath, false, nil) + for _, step := range tc.steps { + step(t, ctx, kCtx, testNamespace) + } }) } } @@ -331,282 +252,145 @@ func TestKubernetesAgentHelm(t *testing.T) { err := kCtx.client.Resources().List(ctx, &nodeList) require.NoError(t, err) - totalK8SNodes := len(nodeList.Items) - require.NotZero(t, totalK8SNodes, "No Kubernetes nodes found") + schedulableNodeCount, err := k8sSchedulableNodeCount(ctx, kCtx) + require.NoError(t, err, "error at getting schedulable node count") + require.NotZero(t, schedulableNodeCount, "no schedulable Kubernetes nodes found") testCases := []struct { - name string - values map[string]any - atLeastAgentPods int - runK8SInnerTests bool - agentPodLabelSelectors []string + name string + skipReason string + steps []k8sTestStep }{ { name: "helm standalone agent default kubernetes privileged", - values: map[string]any{ - "kubernetes": map[string]any{ - "enabled": true, - }, - "agent": map[string]any{ - "unprivileged": false, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "kubernetes": map[string]any{ + "enabled": true, }, - }, - "outputs": map[string]any{ - "default": map[string]any{ - "type": "ESPlainAuthAPI", - "url": kCtx.esHost, - "api_key": kCtx.esAPIKey, + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - // - clusterWide Deployment (1 agent pod) - // - ksmSharded Statefulset (1 agent pod) - atLeastAgentPods: totalK8SNodes + 1 + 1, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", - "name=agent-clusterwide-helm-agent", - "name=agent-ksmsharded-helm-agent", + "outputs": map[string]any{ + "default": map[string]any{ + "type": "ESPlainAuthAPI", + "url": kCtx.esHost, + "api_key": kCtx.esAPIKey, + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil), + k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"), + k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"), }, }, { name: "helm standalone agent default kubernetes unprivileged", - values: map[string]any{ - "kubernetes": map[string]any{ - "enabled": true, - }, - "agent": map[string]any{ - "unprivileged": true, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "kubernetes": map[string]any{ + "enabled": true, }, - }, - "outputs": map[string]any{ - "default": map[string]any{ - "type": "ESPlainAuthAPI", - "url": kCtx.esHost, - "api_key": kCtx.esAPIKey, + "agent": map[string]any{ + "unprivileged": true, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - // - clusterWide Deployment (1 agent pod) - // - ksmSharded Statefulset (1 agent pod) - atLeastAgentPods: totalK8SNodes + 1 + 1, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", - "name=agent-clusterwide-helm-agent", - "name=agent-ksmsharded-helm-agent", + "outputs": map[string]any{ + "default": map[string]any{ + "type": "ESPlainAuthAPI", + "url": kCtx.esHost, + "api_key": kCtx.esAPIKey, + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepCheckAgentStatus("name=agent-clusterwide-helm-agent", 1, "agent", nil), + k8sStepCheckAgentStatus("name=agent-ksmsharded-helm-agent", 1, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + k8sStepRunInnerTests("name=agent-clusterwide-helm-agent", 1, "agent"), + k8sStepRunInnerTests("name=agent-ksmsharded-helm-agent", 1, "agent"), }, }, { name: "helm managed agent default kubernetes privileged", - values: map[string]any{ - "agent": map[string]any{ - "unprivileged": false, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", - }, - "fleet": map[string]any{ - "enabled": true, - "url": kCtx.enrollParams.FleetURL, - "token": kCtx.enrollParams.EnrollmentToken, - "preset": "perNode", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - atLeastAgentPods: totalK8SNodes, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), }, }, { name: "helm managed agent default kubernetes unprivileged", - values: map[string]any{ - "agent": map[string]any{ - "unprivileged": true, - "image": map[string]any{ - "repository": kCtx.agentImageRepo, - "tag": kCtx.agentImageTag, - "pullPolicy": "Never", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": true, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, }, - "fleet": map[string]any{ - "enabled": true, - "url": kCtx.enrollParams.FleetURL, - "token": kCtx.enrollParams.EnrollmentToken, - "preset": "perNode", - }, - }, - }, - runK8SInnerTests: true, - // - perNode Daemonset (totalK8SNodes pods) - atLeastAgentPods: totalK8SNodes, - agentPodLabelSelectors: []string{ - // name=agent-{preset}-{release} - "name=agent-pernode-helm-agent", + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + if tc.skipReason != "" { + t.Skip(tc.skipReason) + } + ctx := context.Background() testNamespace := kCtx.getNamespace(t) - settings := cli.New() - settings.SetNamespace(testNamespace) - actionConfig := &action.Configuration{} - - helmChart, err := loader.Load(agentK8SHelm) - require.NoError(t, err, "failed to load helm chart") - - err = actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), "", - func(format string, v ...interface{}) {}) - require.NoError(t, err, "failed to init helm action config") - - helmValues := tc.values - - k8sNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}} - - t.Cleanup(func() { - if t.Failed() { - if err := k8sDumpAllPodLogs(ctx, kCtx.client, testNamespace, testNamespace, kCtx.logsBasePath); err != nil { - t.Logf("failed to dump logs: %s", err) - } - } - - uninstallAction := action.NewUninstall(actionConfig) - uninstallAction.Wait = true - _, err = uninstallAction.Run("helm-agent") - if err != nil { - t.Logf("failed to uninstall helm chart: %s", err) - } - - err = k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sNamespace) - if err != nil { - t.Logf("failed to delete k8s namespace: %s", err) - } - }) - - installAction := action.NewInstall(actionConfig) - installAction.Namespace = testNamespace - installAction.CreateNamespace = true - installAction.UseReleaseName = true - installAction.ReleaseName = "helm-agent" - installAction.Timeout = 2 * time.Minute - installAction.Wait = true - installAction.WaitForJobs = true - _, err = installAction.Run(helmChart, helmValues) - require.NoError(t, err, "failed to install helm chart") - - healthyAgentPods := 0 - for _, podSelector := range tc.agentPodLabelSelectors { - pods := &corev1.PodList{} - err = kCtx.client.Resources(testNamespace).List(ctx, pods, func(opt *metav1.ListOptions) { - opt.LabelSelector = podSelector - }) - require.NoError(t, err, "failed to list pods with selector ", podSelector) - - for _, pod := range pods.Items { - var stdout, stderr bytes.Buffer - err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, testNamespace, pod.Name, "agent", map[string]bool{}) - if err != nil { - t.Errorf("failed to check agent status: %v", err) - t.Logf("stdout: %s\n", stdout.String()) - t.Logf("stderr: %s\n", stderr.String()) - t.FailNow() - } - healthyAgentPods++ - - if !tc.runK8SInnerTests { - continue - } - - stdout.Reset() - stderr.Reset() - err := kCtx.client.Resources().ExecInPod(ctx, testNamespace, pod.Name, "agent", - []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) - t.Logf("%s k8s-inner-tests output:", pod.Name) - t.Log(stdout.String()) - if err != nil { - t.Log(stderr.String()) - } - require.NoError(t, err, "error at k8s inner tests execution") - } + for _, step := range tc.steps { + step(t, ctx, kCtx, testNamespace) } - - require.GreaterOrEqual(t, healthyAgentPods, tc.atLeastAgentPods, - fmt.Sprintf("at least %d agent containers should be checked", tc.atLeastAgentPods)) }) } } -func k8sKustomizeDeployAgent(t *testing.T, ctx context.Context, client klient.Client, objects []k8s.Object, - namespace string, runK8SInnerTests bool, testlogsBasePath string, checkStatus bool, componentPresence map[string]bool, -) { - err := k8sCreateObjects(ctx, client, k8sCreateOpts{namespace: namespace, wait: true}, objects...) - require.NoError(t, err, "failed to create k8s objects") - - t.Cleanup(func() { - if t.Failed() { - if err := k8sDumpAllPodLogs(ctx, client, namespace, namespace, testlogsBasePath); err != nil { - t.Logf("failed to dump logs: %s", err) - } - } - }) - - pods := &corev1.PodList{} - podsLabelSelector := fmt.Sprintf("app=elastic-agent-standalone") - err = client.Resources(namespace).List(ctx, pods, func(opt *metav1.ListOptions) { - opt.LabelSelector = podsLabelSelector - }) - require.NoError(t, err, "failed to list pods with selector ", podsLabelSelector) - require.NotEmpty(t, pods.Items, "no pods found with selector ", podsLabelSelector) - - for _, pod := range pods.Items { - var stdout, stderr bytes.Buffer - - if checkStatus { - err = k8sCheckAgentStatus(ctx, client, &stdout, &stderr, namespace, pod.Name, "elastic-agent-standalone", componentPresence) - if err != nil { - t.Errorf("failed to check agent status: %v", err) - t.Logf("stdout: %s\n", stdout.String()) - t.Logf("stderr: %s\n", stderr.String()) - t.FailNow() - } - } - - stdout.Reset() - stderr.Reset() - - if runK8SInnerTests { - err := client.Resources().ExecInPod(ctx, namespace, pod.Name, "elastic-agent-standalone", - []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) - t.Logf("%s k8s-inner-tests output:", pod.Name) - t.Log(stdout.String()) - if err != nil { - t.Log(stderr.String()) - } - require.NoError(t, err, "error at k8s inner tests execution") - } - } -} - // k8sCheckAgentStatus checks that the agent reports healthy. func k8sCheckAgentStatus(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, namespace string, agentPodName string, containerName string, componentPresence map[string]bool, @@ -806,8 +590,8 @@ func k8sYAMLToObjects(reader *bufio.Reader) ([]k8s.Object, error) { return objects, nil } -// renderKustomize renders the given kustomize directory to YAML -func renderKustomize(kustomizePath string) ([]byte, error) { +// k8sRenderKustomize renders the given kustomize directory to YAML +func k8sRenderKustomize(kustomizePath string) ([]byte, error) { // Create a file system pointing to the kustomize directory fSys := filesys.MakeFsOnDisk() @@ -1072,6 +856,38 @@ func (k8sContext) getNamespace(t *testing.T) string { return noSpecialCharsRegexp.ReplaceAllString(testNamespace, "") } +func k8sSchedulableNodeCount(ctx context.Context, kCtx k8sContext) (int, error) { + nodeList := corev1.NodeList{} + err := kCtx.client.Resources().List(ctx, &nodeList) + if err != nil { + return 0, err + } + + totalSchedulableNodes := 0 + + for _, node := range nodeList.Items { + if node.Spec.Unschedulable { + continue + } + + hasNoScheduleTaint := false + for _, taint := range node.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule { + hasNoScheduleTaint = true + break + } + } + + if hasNoScheduleTaint { + continue + } + + totalSchedulableNodes++ + } + + return totalSchedulableNodes, err +} + // k8sGetContext performs all the necessary checks to get a k8sContext for the current test func k8sGetContext(t *testing.T, info *define.Info) k8sContext { agentImage := os.Getenv("AGENT_IMAGE") @@ -1118,3 +934,233 @@ func k8sGetContext(t *testing.T, info *define.Info) k8sContext { enrollParams: enrollParams, } } + +// k8sTestStep is a function that performs a single step in a k8s integration test +type k8sTestStep func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) + +// k8sStepCreateNamespace creates a namespace for the current test and adds a test cleanup that +// deletes it +func k8sStepCreateNamespace() k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + k8sNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + t.Cleanup(func() { + err := k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, k8sNamespace) + if err != nil { + t.Logf("failed to delete namespace: %v", err) + } + }) + + err := k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true}, k8sNamespace) + require.NoError(t, err, "failed to create namespace") + } +} + +// k8sKustomizeOverrides is futile attempt to make kustomize somewhat flexible and +// contains certain handpicked overrides to apply to the k8s objects created from +// kustomize rendering +type k8sKustomizeOverrides struct { + agentContainerRunUser *int64 + agentContainerRunGroup *int64 + agentContainerCapabilitiesDrop []corev1.Capability + agentContainerCapabilitiesAdd []corev1.Capability + agentContainerExtraEnv []corev1.EnvVar + agentContainerArgs []string + agentContainerMemoryLimit string +} + +// k8sStepDeployKustomize renders a kustomize manifest and deploys it. Also, it tries to +// adjust the k8s objects created from the rendering to match the needs of the current test with k8sKustomizeOverrides. +// However, this is not that as flexible as we would like it to be. As a last resort somebody can use forEachObject callback +// to further adjust the k8s objects +func k8sStepDeployKustomize(kustomizePath string, containerName string, overrides k8sKustomizeOverrides, forEachObject func(object k8s.Object)) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + renderedManifest, err := k8sRenderKustomize(kustomizePath) + require.NoError(t, err, "failed to render kustomize") + + objects, err := k8sYAMLToObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) + require.NoError(t, err, "failed to parse rendered kustomize") + + if forEachObject != nil { + for _, object := range objects { + forEachObject(object) + } + } + + k8sKustomizeAdjustObjects(objects, namespace, containerName, + func(container *corev1.Container) { + // set agent image + container.Image = kCtx.agentImage + // set ImagePullPolicy to "Never" to avoid pulling the image + // as the image is already loaded by the kubernetes provisioner + container.ImagePullPolicy = "Never" + + if overrides.agentContainerMemoryLimit != "" { + container.Resources.Limits = corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse(overrides.agentContainerMemoryLimit), + } + } + + // if security context overrides are set then set security context + if overrides.agentContainerCapabilitiesDrop != nil || overrides.agentContainerCapabilitiesAdd != nil || + overrides.agentContainerRunUser != nil || overrides.agentContainerRunGroup != nil { + // set security context + container.SecurityContext = &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: overrides.agentContainerCapabilitiesDrop, + Add: overrides.agentContainerCapabilitiesAdd, + }, + RunAsUser: overrides.agentContainerRunUser, + RunAsGroup: overrides.agentContainerRunGroup, + } + } + + // set Elasticsearch host and API key + for idx, env := range container.Env { + if env.Name == "ES_HOST" { + container.Env[idx].Value = kCtx.esHost + container.Env[idx].ValueFrom = nil + } + if env.Name == "API_KEY" { + container.Env[idx].Value = kCtx.esAPIKey + container.Env[idx].ValueFrom = nil + } + } + + if len(overrides.agentContainerExtraEnv) > 0 { + container.Env = append(container.Env, overrides.agentContainerExtraEnv...) + } + + if overrides.agentContainerArgs != nil { + // drop arguments overriding default config + container.Args = []string{} + } + }, + func(pod *corev1.PodSpec) { + for volumeIdx, volume := range pod.Volumes { + // need to update the volume path of the state directory + // to match the test namespace + if volume.Name == "elastic-agent-state" { + hostPathType := corev1.HostPathDirectoryOrCreate + pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ + Type: &hostPathType, + Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", namespace), + } + } + } + }) + + t.Cleanup(func() { + if t.Failed() { + if err := k8sDumpAllPodLogs(ctx, kCtx.client, namespace, namespace, kCtx.logsBasePath); err != nil { + t.Logf("failed to dump logs: %v", err) + } + } + + err := k8sDeleteObjects(ctx, kCtx.client, k8sDeleteOpts{wait: true}, objects...) + if err != nil { + t.Logf("failed to delete objects: %v", err) + } + }) + + err = k8sCreateObjects(ctx, kCtx.client, k8sCreateOpts{wait: true}, objects...) + require.NoError(t, err, "failed to create objects") + } +} + +// k8sStepCheckAgentStatus checks the status of the agent inside the pods returned by the selector +func k8sStepCheckAgentStatus(agentPodLabelSelector string, expectedPodNumber int, containerName string, componentPresence map[string]bool) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + perNodePodList := &corev1.PodList{} + err := kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = agentPodLabelSelector + }) + require.NoError(t, err, "failed to list pods with selector ", perNodePodList) + require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList) + require.Equal(t, expectedPodNumber, len(perNodePodList.Items), "unexpected number of pods found with selector ", perNodePodList) + + for _, pod := range perNodePodList.Items { + var stdout, stderr bytes.Buffer + err = k8sCheckAgentStatus(ctx, kCtx.client, &stdout, &stderr, namespace, pod.Name, containerName, componentPresence) + if err != nil { + t.Errorf("failed to check agent status %s: %v", pod.Name, err) + t.Logf("stdout: %s\n", stdout.String()) + t.Logf("stderr: %s\n", stderr.String()) + t.FailNow() + } + } + } +} + +// k8sStepRunInnerTests invokes the k8s inner tests inside the pods returned by the selector. Note that this +// step requires the agent image to be built with the testing framework as there is the point where the binary +// for the inner tests is copied +func k8sStepRunInnerTests(agentPodLabelSelector string, expectedPodNumber int, containerName string) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + perNodePodList := &corev1.PodList{} + err := kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = agentPodLabelSelector + }) + require.NoError(t, err, "failed to list pods with selector ", perNodePodList) + require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList) + require.Equal(t, expectedPodNumber, len(perNodePodList.Items), "unexpected number of pods found with selector ", perNodePodList) + + for _, pod := range perNodePodList.Items { + var stdout, stderr bytes.Buffer + err = kCtx.client.Resources().ExecInPod(ctx, namespace, pod.Name, containerName, + []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) + t.Logf("%s k8s-inner-tests output:", pod.Name) + t.Log(stdout.String()) + if err != nil { + t.Log(stderr.String()) + } + require.NoError(t, err, "error at k8s inner tests execution") + } + } +} + +// k8sStepHelmDeploy deploys a helm chart with the given values and the release name +func k8sStepHelmDeploy(chartPath string, releaseName string, values map[string]any) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + settings := cli.New() + settings.SetNamespace(namespace) + actionConfig := &action.Configuration{} + + helmChart, err := loader.Load(chartPath) + require.NoError(t, err, "failed to load helm chart") + + err = actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), "", + func(format string, v ...interface{}) {}) + require.NoError(t, err, "failed to init helm action config") + + t.Cleanup(func() { + if t.Failed() { + if err := k8sDumpAllPodLogs(ctx, kCtx.client, namespace, namespace, kCtx.logsBasePath); err != nil { + t.Logf("failed to dump logs: %v", err) + } + } + + uninstallAction := action.NewUninstall(actionConfig) + uninstallAction.Wait = true + _, err = uninstallAction.Run(releaseName) + if err != nil { + t.Logf("failed to uninstall helm chart: %v", err) + } + }) + + installAction := action.NewInstall(actionConfig) + installAction.Namespace = namespace + installAction.CreateNamespace = true + installAction.UseReleaseName = true + installAction.ReleaseName = releaseName + installAction.Timeout = 2 * time.Minute + installAction.Wait = true + installAction.WaitForJobs = true + _, err = installAction.Run(helmChart, values) + require.NoError(t, err, "failed to install helm chart") + } +} diff --git a/testing/integration/testdata/.upgrade-test-agent-versions.yml b/testing/integration/testdata/.upgrade-test-agent-versions.yml index 3dfe1d52d60..8d77c207fd8 100644 --- a/testing/integration/testdata/.upgrade-test-agent-versions.yml +++ b/testing/integration/testdata/.upgrade-test-agent-versions.yml @@ -6,7 +6,7 @@ testVersions: - 8.18.0-SNAPSHOT + - 8.17.1-SNAPSHOT - 8.17.0 - - 8.17.0-SNAPSHOT - 8.16.1 - 7.17.27-SNAPSHOT