From c08e8e334bc4ab5b9489a6026021d29b97ec9d53 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:21:38 +0000 Subject: [PATCH 1/8] Channgelog for 8.10.3 (#3568) (#3577) (cherry picked from commit b970bdfc4e8777548af3df43add09ee889f1c193) Co-authored-by: Pierre HILBERT --- changelog/8.10.3.asciidoc | 41 +++++++++++++++++++ changelog/8.10.3.yaml | 26 ++++++++++++ .../1694700201-gpg-unreachable-url-fix.yaml | 32 --------------- ...ent-handling-of-air-gapped-PGP-checks.yaml | 31 -------------- ...etry-strategy-when-uninstalling-agent.yaml | 32 --------------- 5 files changed, 67 insertions(+), 95 deletions(-) create mode 100644 changelog/8.10.3.asciidoc create mode 100644 changelog/8.10.3.yaml delete mode 100644 changelog/fragments/1694700201-gpg-unreachable-url-fix.yaml delete mode 100644 changelog/fragments/1695035111-Resilient-handling-of-air-gapped-PGP-checks.yaml delete mode 100644 changelog/fragments/1695050880-Improve-retry-strategy-when-uninstalling-agent.yaml diff --git a/changelog/8.10.3.asciidoc b/changelog/8.10.3.asciidoc new file mode 100644 index 00000000000..ba6be27f7fc --- /dev/null +++ b/changelog/8.10.3.asciidoc @@ -0,0 +1,41 @@ +// begin 8.10.3 relnotes + +[[release-notes-8.10.3]] +== 8.10.3 + +Review important information about the 8.10.3 release. + + + + + + + + + +[discrete] +[[new-features-8.10.3]] +=== New features + +The 8.10.3 release adds the following new and notable features. + + +elastic-agent:: + +* Improve Agent Uninstall On Windows By Adding Delay Between Retries When File Removal Is Blocked By Busy Files. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3431[#https://github.com/elastic/elastic-agent/pull/3431] {elastic-agent-issue}https://github.com/elastic/elastic-agent/issues/3221[#https://github.com/elastic/elastic-agent/issues/3221] + + + + + + +[discrete] +[[bug-fixes-8.10.3]] +=== Bug fixes + + +elastic-agent:: + +* Resilient Handling Of Air Gapped Pgp Checks. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3427[#https://github.com/elastic/elastic-agent/pull/3427] {elastic-agent-issue}https://github.com/elastic/elastic-agent/issues/3368[#https://github.com/elastic/elastic-agent/issues/3368] + +// end 8.10.3 relnotes diff --git a/changelog/8.10.3.yaml b/changelog/8.10.3.yaml new file mode 100644 index 00000000000..6273a300a63 --- /dev/null +++ b/changelog/8.10.3.yaml @@ -0,0 +1,26 @@ +version: 8.10.3 +entries: + - kind: bug-fix + summary: Resilient handling of air gapped PGP checks + description: Elastic Agent should not fail when remote PGP is specified (or official Elastic fallback PGP used) and remote is not available + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3427 + issue: + - https://github.com/elastic/elastic-agent/issues/3368 + timestamp: 1695035111 + file: + name: 1695035111-Resilient-handling-of-air-gapped-PGP-checks.yaml + checksum: 8741bfa9475a09d5901dc3fab0fed3a06b55d5bb + - kind: feature + summary: Improve Agent uninstall on Windows by adding delay between retries when file removal is blocked by busy files + description: "" + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3431 + issue: + - https://github.com/elastic/elastic-agent/issues/3221 + timestamp: 1695050880 + file: + name: 1695050880-Improve-retry-strategy-when-uninstalling-agent.yaml + checksum: 45eab228dfd89392a0f3685a628f73ccce05d081 diff --git a/changelog/fragments/1694700201-gpg-unreachable-url-fix.yaml b/changelog/fragments/1694700201-gpg-unreachable-url-fix.yaml deleted file mode 100644 index 42c8945cb91..00000000000 --- a/changelog/fragments/1694700201-gpg-unreachable-url-fix.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: bug-fix - -# Change summary; a 80ish characters long description of the change. -summary: Fix gpg verification, if one is successful upgrade should continue. - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. -#description: - -# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. -component: elastic-agent - -# PR URL; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3426 - -# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: https://github.com/elastic/elastic-agent/issues/3368 diff --git a/changelog/fragments/1695035111-Resilient-handling-of-air-gapped-PGP-checks.yaml b/changelog/fragments/1695035111-Resilient-handling-of-air-gapped-PGP-checks.yaml deleted file mode 100644 index caaa8a2f53a..00000000000 --- a/changelog/fragments/1695035111-Resilient-handling-of-air-gapped-PGP-checks.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: bug-fix - -# Change summary; a 80ish characters long description of the change. -summary: Resilient handling of air gapped PGP checks - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -description: Elastic Agent should not fail when remote PGP is specified (or official Elastic fallback PGP used) and remote is not available - -# Affected component; a word indicating the component this changeset affects. -component: elastic-agent - -# PR number; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -pr: 3427 - -# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: 3368 diff --git a/changelog/fragments/1695050880-Improve-retry-strategy-when-uninstalling-agent.yaml b/changelog/fragments/1695050880-Improve-retry-strategy-when-uninstalling-agent.yaml deleted file mode 100644 index b3c1e7ac5e7..00000000000 --- a/changelog/fragments/1695050880-Improve-retry-strategy-when-uninstalling-agent.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: feature - -# Change summary; a 80ish characters long description of the change. -summary: Improve uninstall by adding some pause between retries when removal is blocked by busy files - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. -#description: - -# Affected component; a word indicating the component this changeset affects. -component: elastic-agent - -# PR URL; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -#pr: https://github.com/owner/repo/1234 - -# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -#issue: https://github.com/owner/repo/1234 From 069b0658c5006cadff9eb6940783ed4a10308e1c Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Fri, 13 Oct 2023 14:28:38 -0700 Subject: [PATCH 2/8] Add warning messages to magefile for version mismatches in beats (#3580) * add warning messages to magefile * change to hard error * fix mage build target * Update magefile.go Co-authored-by: Shaunak Kashyap * Update magefile.go Co-authored-by: Shaunak Kashyap * Update magefile.go Co-authored-by: Shaunak Kashyap --------- Co-authored-by: Shaunak Kashyap --- magefile.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/magefile.go b/magefile.go index 183889b3e69..bf7b335cbe8 100644 --- a/magefile.go +++ b/magefile.go @@ -979,10 +979,10 @@ func packageAgent(platforms []string, packagingFn func()) { panic(err) } - packagesMissing := false packagesCopied := 0 if !requiredPackagesPresent(pwd, b, packageVersion, requiredPackages) { + fmt.Printf("--- Package %s\n", pwd) cmd := exec.Command("mage", "package") cmd.Dir = pwd cmd.Stdout = os.Stdout @@ -1008,6 +1008,15 @@ func packageAgent(platforms []string, packagingFn func()) { targetPath := filepath.Join(archivePath, rp) os.MkdirAll(targetPath, 0755) for _, f := range files { + // safety check; if the user has an older version of the beats repo, + // for example right after a release where you've `git pulled` from on repo and not the other, + // they might end up with a mishmash of packages from different versions. + // check to see if we have mismatched versions. + if !strings.Contains(f, packageVersion) { + // if this panic hits weird edge cases where we don't want actual failures, revert to a printf statement. + panic(fmt.Sprintf("the file %s doesn't match agent version %s, beats repo might be out of date", f, packageVersion)) + } + targetFile := filepath.Join(targetPath, filepath.Base(f)) packagesCopied += 1 if err := sh.Copy(targetFile, f); err != nil { @@ -1017,8 +1026,8 @@ func packageAgent(platforms []string, packagingFn func()) { } // a very basic footcannon protector; if packages are missing and we need to rebuild them, check to see if those files were copied // if we needed to repackage beats but still somehow copied nothing, could indicate an issue. Usually due to beats and agent being at different versions. - if packagesMissing && packagesCopied == 0 { - fmt.Printf(">>> WARNING: no packages were copied, but we repackaged beats anyway. Check binary to see if intended beats are there.") + if packagesCopied == 0 { + fmt.Println(">>> WARNING: no packages were copied, but we repackaged beats anyway. Check binary to see if intended beats are there.") } } } @@ -1153,6 +1162,7 @@ func copyComponentSpecs(componentName, versionedDropPath string) (string, error) targetPath := filepath.Join(versionedDropPath, specFileName) if _, err := os.Stat(targetPath); err != nil { + fmt.Printf(">> File %s does not exist, reverting to local specfile\n", targetPath) // spec not present copy from local sourceSpecFile := filepath.Join("specs", specFileName) if mg.Verbose() { From be04fba8e3797607dcf5f7eaa238050a711d99a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Paolo=20Chil=C3=A0?= Date: Mon, 16 Oct 2023 08:48:43 +0200 Subject: [PATCH 3/8] ESS Staging integ test improvements (#3547) * support tags on integration tests ESS deployments --- .../create_deployment_csp_configuration.yaml | 15 + .../ess/create_deployment_request.tmpl.json | 102 +++++++ pkg/testing/ess/deployment.go | 289 ++++-------------- pkg/testing/ess/provisioner.go | 28 +- 4 files changed, 206 insertions(+), 228 deletions(-) create mode 100644 pkg/testing/ess/create_deployment_csp_configuration.yaml create mode 100644 pkg/testing/ess/create_deployment_request.tmpl.json diff --git a/pkg/testing/ess/create_deployment_csp_configuration.yaml b/pkg/testing/ess/create_deployment_csp_configuration.yaml new file mode 100644 index 00000000000..199f664a65a --- /dev/null +++ b/pkg/testing/ess/create_deployment_csp_configuration.yaml @@ -0,0 +1,15 @@ +gcp: + integrations_server_conf_id: "gcp.integrationsserver.n2.68x32x45.2" + elasticsearch_conf_id: "gcp.es.datahot.n2.68x10x45" + elasticsearch_deployment_template_id: "gcp-storage-optimized-v5" + kibana_instance_configuration_id: "gcp.kibana.n2.68x32x45" +azure: + integrations_server_conf_id: "azure.integrationsserver.fsv2.2" + elasticsearch_conf_id: "azure.es.datahot.edsv4" + elasticsearch_deployment_template_id: "azure-storage-optimized-v2" + kibana_instance_configuration_id: "azure.kibana.fsv2" +aws: + integrations_server_conf_id: "aws.integrationsserver.c5d.2.1" + elasticsearch_conf_id: "aws.es.datahot.i3.1.1" + elasticsearch_deployment_template_id: "aws-storage-optimized-v5" + kibana_instance_configuration_id: "aws.kibana.c5d.1.1" \ No newline at end of file diff --git a/pkg/testing/ess/create_deployment_request.tmpl.json b/pkg/testing/ess/create_deployment_request.tmpl.json new file mode 100644 index 00000000000..3ef93868708 --- /dev/null +++ b/pkg/testing/ess/create_deployment_request.tmpl.json @@ -0,0 +1,102 @@ +{ + "resources": { + "integrations_server": [ + { + "elasticsearch_cluster_ref_id": "main-elasticsearch", + "region": "{{ .request.Region }}", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "{{ .integrations_server_conf_id }}", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "integrations_server": { + "version": "{{ .request.Version }}" + } + }, + "ref_id": "main-integrations_server" + } + ], + "elasticsearch": [ + { + "region": "{{ .request.Region }}", + "settings": { + "dedicated_masters_threshold": 6 + }, + "plan": { + "cluster_topology": [ + { + "zone_count": 1, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "instance_configuration_id": "{{.elasticsearch_conf_id}}", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + } + ], + "elasticsearch": { + "version": "{{ .request.Version }}", + "enabled_built_in_plugins": [] + }, + "deployment_template": { + "id": "{{ .elasticsearch_deployment_template_id }}" + } + }, + "ref_id": "main-elasticsearch" + } + ], + "enterprise_search": [], + "kibana": [ + { + "elasticsearch_cluster_ref_id": "main-elasticsearch", + "region": "{{ .request.Region }}", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "{{.kibana_instance_configuration_id}}", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "kibana": { + "version": "{{ .request.Version }}", + "user_settings_json": { + "xpack.fleet.enableExperimental": ["agentTamperProtectionEnabled"] + } + } + }, + "ref_id": "main-kibana" + } + ] + }, + "settings": { + "autoscaling_enabled": false + }, + "name": "{{ .request.Name }}", + "metadata": { + "system_owned": false, + "tags": {{ json .request.Tags }} + } +} \ No newline at end of file diff --git a/pkg/testing/ess/deployment.go b/pkg/testing/ess/deployment.go index 9d9469036b0..a79f8cb58cb 100644 --- a/pkg/testing/ess/deployment.go +++ b/pkg/testing/ess/deployment.go @@ -7,19 +7,28 @@ package ess import ( "bytes" "context" + _ "embed" "encoding/json" "fmt" - "html/template" "net/http" "net/url" "strings" + "text/template" "time" + + "gopkg.in/yaml.v2" ) +type Tag struct { + Key string `json:"key"` + Value string `json:"value"` +} + type CreateDeploymentRequest struct { Name string `json:"name"` Region string `json:"region"` Version string `json:"version"` + Tags []Tag `json:"tags"` } type CreateDeploymentResponse struct { @@ -85,21 +94,16 @@ type DeploymentStatusResponse struct { // CreateDeployment creates the deployment with the specified configuration. func (c *Client) CreateDeployment(ctx context.Context, req CreateDeploymentRequest) (*CreateDeploymentResponse, error) { - tpl, err := deploymentTemplateFactory(req) + reqBodyBytes, err := generateCreateDeploymentRequestBody(req) if err != nil { return nil, err } - var buf bytes.Buffer - if err := tpl.Execute(&buf, req); err != nil { - return nil, fmt.Errorf("unable to create deployment creation request body: %w", err) - } - createResp, err := c.doPost( ctx, "deployments", "application/json", - &buf, + bytes.NewReader(reqBodyBytes), ) if err != nil { return nil, fmt.Errorf("error calling deployment creation API: %w", err) @@ -308,233 +312,70 @@ func overallStatus(statuses ...DeploymentStatus) DeploymentStatus { return overallStatus } -func deploymentTemplateFactory(req CreateDeploymentRequest) (*template.Template, error) { +//go:embed create_deployment_request.tmpl.json +var createDeploymentRequestTemplate string + +//go:embed create_deployment_csp_configuration.yaml +var cloudProviderSpecificValues []byte + +func generateCreateDeploymentRequestBody(req CreateDeploymentRequest) ([]byte, error) { regionParts := strings.Split(req.Region, "-") if len(regionParts) < 2 { return nil, fmt.Errorf("unable to parse CSP out of region [%s]", req.Region) } csp := regionParts[0] - var tplStr string - switch csp { - case "gcp": - tplStr = createDeploymentRequestTemplateGCP - case "azure": - tplStr = createDeploymentRequestTemplateAzure - default: - return nil, fmt.Errorf("unsupported CSP [%s]", csp) + templateContext, err := createDeploymentTemplateContext(csp, req) + if err != nil { + return nil, fmt.Errorf("creating request template context: %w", err) } - tpl, err := template.New("create_deployment_request").Parse(tplStr) + tpl, err := template.New("create_deployment_request"). + Funcs(template.FuncMap{"json": jsonMarshal}). + Parse(createDeploymentRequestTemplate) if err != nil { return nil, fmt.Errorf("unable to parse deployment creation template: %w", err) } - return tpl, nil + var bBuf bytes.Buffer + err = tpl.Execute(&bBuf, templateContext) + if err != nil { + return nil, fmt.Errorf("rendering create deployment request template with context %v : %w", templateContext, err) + } + return bBuf.Bytes(), nil } -const createDeploymentRequestTemplateGCP = ` -{ - "resources": { - "integrations_server": [ - { - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "region": "{{ .Region }}", - "plan": { - "cluster_topology": [ - { - "instance_configuration_id": "gcp.integrationsserver.n2.68x32x45.2", - "zone_count": 1, - "size": { - "resource": "memory", - "value": 1024 - } - } - ], - "integrations_server": { - "version": "{{ .Version }}" - } - }, - "ref_id": "main-integrations_server" - } - ], - "elasticsearch": [ - { - "region": "{{ .Region }}", - "settings": { - "dedicated_masters_threshold": 6 - }, - "plan": { - "cluster_topology": [ - { - "zone_count": 1, - "elasticsearch": { - "node_attributes": { - "data": "hot" - } - }, - "instance_configuration_id": "gcp.es.datahot.n2.68x10x45", - "node_roles": [ - "master", - "ingest", - "transform", - "data_hot", - "remote_cluster_client", - "data_content" - ], - "id": "hot_content", - "size": { - "resource": "memory", - "value": 8192 - } - } - ], - "elasticsearch": { - "version": "{{ .Version }}", - "enabled_built_in_plugins": [] - }, - "deployment_template": { - "id": "gcp-storage-optimized-v5" - } - }, - "ref_id": "main-elasticsearch" - } - ], - "enterprise_search": [], - "kibana": [ - { - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "region": "{{ .Region }}", - "plan": { - "cluster_topology": [ - { - "instance_configuration_id": "gcp.kibana.n2.68x32x45", - "zone_count": 1, - "size": { - "resource": "memory", - "value": 1024 - } - } - ], - "kibana": { - "version": "{{ .Version }}", - "user_settings_json": { - "xpack.fleet.enableExperimental": ["agentTamperProtectionEnabled"] - } - } - }, - "ref_id": "main-kibana" - } - ] - }, - "settings": { - "autoscaling_enabled": false - }, - "name": "{{ .Name }}", - "metadata": { - "system_owned": false - } -}` - -const createDeploymentRequestTemplateAzure = ` -{ - "resources": { - "integrations_server": [ - { - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "region": "{{ .Region }}", - "plan": { - "cluster_topology": [ - { - "instance_configuration_id": "azure.integrationsserver.fsv2.2", - "zone_count": 1, - "size": { - "resource": "memory", - "value": 1024 - } - } - ], - "integrations_server": { - "version": "{{ .Version }}" - } - }, - "ref_id": "main-integrations_server" - } - ], - "elasticsearch": [ - { - "region": "{{ .Region }}", - "settings": { - "dedicated_masters_threshold": 6 - }, - "plan": { - "cluster_topology": [ - { - "zone_count": 1, - "elasticsearch": { - "node_attributes": { - "data": "hot" - } - }, - "instance_configuration_id": "azure.es.datahot.edsv4", - "node_roles": [ - "master", - "ingest", - "transform", - "data_hot", - "remote_cluster_client", - "data_content" - ], - "id": "hot_content", - "size": { - "resource": "memory", - "value": 8192 - } - } - ], - "elasticsearch": { - "version": "{{ .Version }}", - "enabled_built_in_plugins": [] - }, - "deployment_template": { - "id": "azure-storage-optimized-v2" - } - }, - "ref_id": "main-elasticsearch" - } - ], - "enterprise_search": [], - "kibana": [ - { - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "region": "{{ .Region }}", - "plan": { - "cluster_topology": [ - { - "instance_configuration_id": "azure.kibana.fsv2", - "zone_count": 1, - "size": { - "resource": "memory", - "value": 1024 - } - } - ], - "kibana": { - "version": "{{ .Version }}", - "user_settings_json": { - "xpack.fleet.enableExperimental": ["agentTamperProtectionEnabled"] - } - } - }, - "ref_id": "main-kibana" - } - ] - }, - "settings": { - "autoscaling_enabled": false - }, - "name": "{{ .Name }}", - "metadata": { - "system_owned": false - } -}` +func jsonMarshal(in any) (string, error) { + jsonBytes, err := json.Marshal(in) + if err != nil { + return "", err + } + + return string(jsonBytes), nil +} + +func createDeploymentTemplateContext(csp string, req CreateDeploymentRequest) (map[string]any, error) { + cspSpecificContext, err := loadCspValues(csp) + if err != nil { + return nil, fmt.Errorf("loading csp-specific values for %q: %w", csp, err) + } + + cspSpecificContext["request"] = req + + return cspSpecificContext, nil +} + +func loadCspValues(csp string) (map[string]any, error) { + var cspValues map[string]map[string]any + + err := yaml.Unmarshal(cloudProviderSpecificValues, &cspValues) + if err != nil { + return nil, fmt.Errorf("unmarshalling error: %w", err) + } + values, supportedCSP := cspValues[csp] + if !supportedCSP { + return nil, fmt.Errorf("csp %s not supported", csp) + } + + return values, nil +} diff --git a/pkg/testing/ess/provisioner.go b/pkg/testing/ess/provisioner.go index 941cf5bcaf7..081b4100869 100644 --- a/pkg/testing/ess/provisioner.go +++ b/pkg/testing/ess/provisioner.go @@ -67,7 +67,14 @@ func (p *provisioner) Provision(ctx context.Context, requests []runner.StackRequ for _, r := range requests { // allow up to 2 minutes for each create request createCtx, createCancel := context.WithTimeout(ctx, 2*time.Minute) - resp, err := p.createDeployment(createCtx, r) + resp, err := p.createDeployment(createCtx, r, + map[string]string{ + "division": "engineering", + "org": "ingest", + "team": "elastic-agent", + "project": "elastic-agent", + "integration-tests": "true", + }) createCancel() if err != nil { return nil, err @@ -131,17 +138,30 @@ func (p *provisioner) Clean(ctx context.Context, stacks []runner.Stack) error { return nil } -func (p *provisioner) createDeployment(ctx context.Context, r runner.StackRequest) (*CreateDeploymentResponse, error) { +func (p *provisioner) createDeployment(ctx context.Context, r runner.StackRequest, tags map[string]string) (*CreateDeploymentResponse, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() p.logger.Logf("Creating stack %s (%s)", r.Version, r.ID) name := fmt.Sprintf("%s-%s", strings.Replace(p.cfg.Identifier, ".", "-", -1), r.ID) - resp, err := p.client.CreateDeployment(ctx, CreateDeploymentRequest{ + + // prepare tags + tagArray := make([]Tag, 0, len(tags)) + for k, v := range tags { + tagArray = append(tagArray, Tag{ + Key: k, + Value: v, + }) + } + + createDeploymentRequest := CreateDeploymentRequest{ Name: name, Region: p.cfg.Region, Version: r.Version, - }) + Tags: tagArray, + } + + resp, err := p.client.CreateDeployment(ctx, createDeploymentRequest) if err != nil { p.logger.Logf("Failed to create ESS cloud %s: %s", r.Version, err) return nil, fmt.Errorf("failed to create ESS cloud for version %s: %w", r.Version, err) From 05d4eaff31eeee8d92c3c6d0055d3c79112bfec9 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:55:50 -0400 Subject: [PATCH 4/8] [Automation] Bump Golang version to 1.20.10 (#3601) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update from dockerfiles Made with ❤️️ by updatecli * chore: Update version.asciidoc Made with ❤️️ by updatecli * chore: Update .golangci.yml Made with ❤️️ by updatecli * chore: Update .go-version Made with ❤️️ by updatecli * Add Go 1.20.10 changelog fragment. --------- Co-authored-by: apmmachine Co-authored-by: Craig MacKenzie --- .go-version | 2 +- .golangci.yml | 8 ++++---- Dockerfile | 2 +- Dockerfile.skaffold | 2 +- ...-1.20.9.yaml => 1697229987-Upgrade-to-Go-1.20.10.yaml} | 6 +++--- version/docs/version.asciidoc | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) rename changelog/fragments/{1697111928-upgrade-to-go-1.20.9.yaml => 1697229987-Upgrade-to-Go-1.20.10.yaml} (93%) diff --git a/.go-version b/.go-version index 20538953a5b..acdfc7930c8 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.9 +1.20.10 diff --git a/.golangci.yml b/.golangci.yml index 89203ee7a09..c72fb52ef8d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -116,7 +116,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.20.9" + go: "1.20.10" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -136,17 +136,17 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.20.9" + go: "1.20.10" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.20.9" + go: "1.20.10" checks: ["all"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.20.9" + go: "1.20.10" gosec: excludes: diff --git a/Dockerfile b/Dockerfile index 10882552a46..f3d3475041b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.20.9 +ARG GO_VERSION=1.20.10 FROM circleci/golang:${GO_VERSION} diff --git a/Dockerfile.skaffold b/Dockerfile.skaffold index 24a4df32626..4122e146bf6 100644 --- a/Dockerfile.skaffold +++ b/Dockerfile.skaffold @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.20.9 +ARG GO_VERSION=1.20.10 ARG crossbuild_image="docker.elastic.co/beats-dev/golang-crossbuild" ARG AGENT_VERSION=8.9.0-SNAPSHOT ARG AGENT_IMAGE="docker.elastic.co/beats/elastic-agent" diff --git a/changelog/fragments/1697111928-upgrade-to-go-1.20.9.yaml b/changelog/fragments/1697229987-Upgrade-to-Go-1.20.10.yaml similarity index 93% rename from changelog/fragments/1697111928-upgrade-to-go-1.20.9.yaml rename to changelog/fragments/1697229987-Upgrade-to-Go-1.20.10.yaml index 0e2bba7ec53..84556964dd9 100644 --- a/changelog/fragments/1697111928-upgrade-to-go-1.20.9.yaml +++ b/changelog/fragments/1697229987-Upgrade-to-Go-1.20.10.yaml @@ -11,7 +11,7 @@ kind: security # Change summary; a 80ish characters long description of the change. -summary: Upgrade to Go 1.20.9 +summary: Upgrade to Go 1.20.10. # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. @@ -19,13 +19,13 @@ summary: Upgrade to Go 1.20.9 #description: # Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. -component: elastic-agent +component: "elastic-agent" # PR URL; optional; the PR number that added the changeset. # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3393 +pr: https://github.com/elastic/elastic-agent/pull/3601 # Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 153c58d4edf..87bce0ff1c3 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -3,7 +3,7 @@ // FIXME: once elastic.co docs have been switched over to use `main`, remove // the `doc-site-branch` line below as well as any references to it in the code. :doc-site-branch: master -:go-version: 1.20.9 +:go-version: 1.20.10 :release-state: unreleased :python: 3.7 :docker: 1.12 From 9adbac28c880cc73d98f57dc5c0369db31aed3cb Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Mon, 16 Oct 2023 13:45:32 -0400 Subject: [PATCH 5/8] Remove the agent version override in tests. (#3563) * Remove the agent version override in tests. We now have working 8.12.0 and 8.11.0 snapshots. * Add failing unit test for upgradeable minor during FF. * Return 8.11.0-SNAPSHOT as the previous minor. * Fix broken condition in upgrade rollback test. Use the agent core version in the condition in the policy because the variable evaluation strips the snapshot suffix. * Don't auto overwrite sourceURI if it isn't the default. --- .buildkite/scripts/steps/integration_tests.sh | 4 +- .../artifact/download/snapshot/downloader.go | 5 + .../download/snapshot/downloader_test.go | 27 ++++++ pkg/testing/tools/artifacts_api.go | 31 +++---- testing/integration/upgrade_rollback_test.go | 8 +- testing/upgradetest/versions.go | 27 +++++- testing/upgradetest/versions_test.go | 92 +++++++++++++++++++ 7 files changed, 170 insertions(+), 24 deletions(-) create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go create mode 100644 testing/upgradetest/versions_test.go diff --git a/.buildkite/scripts/steps/integration_tests.sh b/.buildkite/scripts/steps/integration_tests.sh index 8b1dec76449..d4ebfc03a2e 100755 --- a/.buildkite/scripts/steps/integration_tests.sh +++ b/.buildkite/scripts/steps/integration_tests.sh @@ -5,8 +5,8 @@ source .buildkite/scripts/common.sh # Override the agent package version using a string with format .. # NOTE: use only after version bump when the new version is not yet available, for example: -# OVERRIDE_AGENT_PACKAGE_VERSION="8.10.3" -OVERRIDE_AGENT_PACKAGE_VERSION="8.10.2" +# OVERRIDE_AGENT_PACKAGE_VERSION="8.10.3" otherwise OVERRIDE_AGENT_PACKAGE_VERSION="". +OVERRIDE_AGENT_PACKAGE_VERSION="" if [[ -n "$OVERRIDE_AGENT_PACKAGE_VERSION" ]]; then OVERRIDE_TEST_AGENT_VERSION=${OVERRIDE_AGENT_PACKAGE_VERSION}"-SNAPSHOT" diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index 0cad22dbe09..51b16ee4372 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -88,6 +88,11 @@ func snapshotConfig(config *artifact.Config, versionOverride *agtversion.ParsedS } func snapshotURI(versionOverride *agtversion.ParsedSemVer, config *artifact.Config) (string, error) { + // Respect a non-default source URI even if the version is a snapshot. + if config.SourceURI != artifact.DefaultSourceURI { + return config.SourceURI, nil + } + // snapshot downloader is used also by the 'localremote' impl in case of agent currently running off a snapshot build: // the 'localremote' downloader does not pass a specific version, implying that we should update to the latest snapshot // build of the same ..-SNAPSHOT version diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go new file mode 100644 index 00000000000..18ed58b0d65 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package snapshot + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/pkg/version" +) + +func TestNonDefaultSourceURI(t *testing.T) { + version, err := version.ParseVersion("8.12.0-SNAPSHOT") + require.NoError(t, err) + + config := artifact.Config{ + SourceURI: "localhost:1234", + } + sourceURI, err := snapshotURI(version, &config) + require.NoError(t, err) + require.Equal(t, config.SourceURI, sourceURI) + +} diff --git a/pkg/testing/tools/artifacts_api.go b/pkg/testing/tools/artifacts_api.go index 0e91d212af3..bbf79eab032 100644 --- a/pkg/testing/tools/artifacts_api.go +++ b/pkg/testing/tools/artifacts_api.go @@ -34,21 +34,20 @@ var ( ErrBadHTTPStatusCode = errors.New("bad http status code") ) +type Manifests struct { + LastUpdateTime string `json:"last-update-time"` + SecondsSinceLastUpdate int `json:"seconds-since-last-update"` +} + type VersionList struct { - Versions []string `json:"versions"` - Aliases []string `json:"aliases"` - Manifests struct { - LastUpdateTime string `json:"last-update-time"` - SecondsSinceLastUpdate int `json:"seconds-since-last-update"` - } `json:"manifests"` + Versions []string `json:"versions"` + Aliases []string `json:"aliases"` + Manifests Manifests `json:"manifests"` } type VersionBuilds struct { - Builds []string `json:"builds"` - Manifests struct { - LastUpdateTime string `json:"last-update-time"` - SecondsSinceLastUpdate int `json:"seconds-since-last-update"` - } `json:"manifests"` + Builds []string `json:"builds"` + Manifests Manifests `json:"manifests"` } type Package struct { @@ -99,18 +98,12 @@ type Build struct { type BuildDetails struct { Build Build - Manifests struct { - LastUpdateTime string `json:"last-update-time"` - SecondsSinceLastUpdate int `json:"seconds-since-last-update"` - } `json:"manifests"` + Manifests Manifests `json:"manifests"` } type SearchPackageResult struct { Packages map[string]Package `json:"packages"` - Manifests struct { - LastUpdateTime string `json:"last-update-time"` - SecondsSinceLastUpdate int `json:"seconds-since-last-update"` - } `json:"manifests"` + Manifests Manifests `json:"manifests"` } type httpDoer interface { diff --git a/testing/integration/upgrade_rollback_test.go b/testing/integration/upgrade_rollback_test.go index 920a399ffb2..3226ae92a51 100644 --- a/testing/integration/upgrade_rollback_test.go +++ b/testing/integration/upgrade_rollback_test.go @@ -22,6 +22,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/install" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/testing/upgradetest" ) @@ -56,6 +57,11 @@ func TestStandaloneUpgradeRollback(t *testing.T) { t.Logf("Testing Elastic Agent upgrade from %s to %s...", define.Version(), upgradeToVersion) + // We need to use the core version in the condition below because -SNAPSHOT is + // stripped from the ${agent.version.version} evaluation below. + parsedUpgradeToVersion, err := version.ParseVersion(upgradeToVersion) + require.NoError(t, err) + // Configure Agent with fast watcher configuration and also an invalid // input when the Agent version matches the upgraded Agent version. This way // the pre-upgrade version of the Agent runs healthy, but the post-upgrade @@ -71,7 +77,7 @@ inputs: - condition: '${agent.version.version} == "%s"' type: invalid id: invalid-input -`, upgradeToVersion) +`, parsedUpgradeToVersion.CoreVersion()) return startFixture.Configure(ctx, []byte(invalidInputPolicy)) } diff --git a/testing/upgradetest/versions.go b/testing/upgradetest/versions.go index a1f046bb6aa..ea77b6f0ba6 100644 --- a/testing/upgradetest/versions.go +++ b/testing/upgradetest/versions.go @@ -42,10 +42,16 @@ func GetUpgradableVersions(ctx context.Context, upgradeToVersion string, current return nil, errors.New("retrieved versions list from Artifact API is empty") } + return getUpgradableVersions(ctx, vList, upgradeToVersion, currentMajorVersions, previousMajorVersions) +} + +// Internal version of GetUpgradableVersions() with the artifacts API dependency removed for testing. +func getUpgradableVersions(ctx context.Context, vList *tools.VersionList, upgradeToVersion string, currentMajorVersions int, previousMajorVersions int) ([]*version.ParsedSemVer, error) { parsedUpgradeToVersion, err := version.ParseVersion(upgradeToVersion) if err != nil { return nil, fmt.Errorf("upgradeToVersion %q is not a valid version string: %w", upgradeToVersion, err) } + currentMajor := parsedUpgradeToVersion.Major() var currentMajorSelected, previousMajorSelected int @@ -66,6 +72,11 @@ func GetUpgradableVersions(ctx context.Context, upgradeToVersion string, current // we want to sort in descending orders, so we sort them sort.Sort(sort.Reverse(sortedParsedVersions)) + // If the only available build of the most recent version is a snapshot it is unreleased. + // This is always true on main and true until the first release of each minor version branch. + mostRecentVersion := sortedParsedVersions[0] + mostRecentIsUnreleased := mostRecentVersion.IsSnapshot() + var upgradableVersions []*version.ParsedSemVer for _, parsedVersion := range sortedParsedVersions { if currentMajorSelected == currentMajorVersions && previousMajorSelected == previousMajorVersions { @@ -78,9 +89,21 @@ func GetUpgradableVersions(ctx context.Context, upgradeToVersion string, current continue } + isPrevMinor := (parsedUpgradeToVersion.Major() == parsedVersion.Major()) && + (parsedUpgradeToVersion.Minor()-parsedVersion.Minor()) == 1 + if parsedVersion.IsSnapshot() { - // skip all snapshots - continue + // Allow returning the snapshot build of the previous minor if the current version is unreleased. + // In this situation the previous minor branch may also be unreleased immediately after feature freeze. + if !mostRecentIsUnreleased || !isPrevMinor { + continue + } + } else { + // Skip the non-snapshot build of the previous minor since it might only be available at + // staging.elastic.co which is not a default binary download location. + if mostRecentIsUnreleased && isPrevMinor { + continue + } } if parsedVersion.Major() == currentMajor && currentMajorSelected < currentMajorVersions { diff --git a/testing/upgradetest/versions_test.go b/testing/upgradetest/versions_test.go new file mode 100644 index 00000000000..14fe58128cc --- /dev/null +++ b/testing/upgradetest/versions_test.go @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgradetest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/pkg/testing/tools" +) + +// Response from https://artifacts-api.elastic.co/v1/versions shortly after the 8.11 feature freeze. +var versionListAfter8_11FeatureFreeze = tools.VersionList{ + Versions: []string{ + "7.17.10", + "7.17.11", + "7.17.12", + "7.17.13", + "7.17.14-SNAPSHOT", + "7.17.14", + "8.7.1", + "8.8.0", + "8.8.1", + "8.8.2", + "8.9.0", + "8.9.1", + "8.9.2", + "8.10.0-SNAPSHOT", + "8.10.0", + "8.10.1-SNAPSHOT", + "8.10.1", + "8.10.2-SNAPSHOT", + "8.10.2", + "8.10.3-SNAPSHOT", + "8.10.3", + "8.11.0-SNAPSHOT", + "8.11.0", + "8.12.0-SNAPSHOT", + }, + Aliases: []string{ + "7.17-SNAPSHOT", + "7.17", + "8.7", + "8.8", + "8.9", + "8.10-SNAPSHOT", + "8.10", + "8.11-SNAPSHOT", + "8.11", + "8.12-SNAPSHOT", + }, + Manifests: tools.Manifests{ + LastUpdateTime: "Tue, 10 Oct 2023 19:20:17 UTC", + SecondsSinceLastUpdate: 278, + }, +} + +// Tests that GetUpgradableVersions behaves correctly during the feature freeze period +// where the both main and the previous minor release branch versions are unreleased. +// Regression test for the problem described in https://github.com/elastic/elastic-agent/pull/3563#issuecomment-1756007790. +func TestGetUpgradableVersionsAfterFeatureFreeze(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start from 8.12.0 assuming the 8.11.0 feature freeze has just happened. + // The 8.11.0 release is upgradable because the first 8.11.0 build candidate exists, + // but it is only available from staging.elastic.co which is not a binary download + // source that is supported by default. + currentVersion := "8.12.0" + + // Since the 8.11.0 BC at staging.elastic.co isn't available to the agent by default, + // getUpgradableVersions should return 8.11.0-SNAPSHOT as the previous minor so an + // upgrade can proceed. + expectedUpgradableVersions := []string{ + "8.11.0-SNAPSHOT", "8.10.3", "8.10.2", "7.17.14", "7.17.13", + } + + // Get several of the previous versions to ensure snapshot selection works correctly. + versions, err := getUpgradableVersions(ctx, &versionListAfter8_11FeatureFreeze, currentVersion, 3, 2) + require.NoError(t, err) + require.NotEmpty(t, versions) + + t.Logf("exp: %s", expectedUpgradableVersions) + t.Logf("act: %s", versions) + for i, exp := range expectedUpgradableVersions { + require.Equal(t, exp, versions[i].String()) + } +} From fa357a875cfe179f8e4846f70d8c8bececef85ee Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 16 Oct 2023 23:04:28 +0200 Subject: [PATCH 6/8] [Fix] Agent incapable of running on Azure Container Instances (#3576) What this change is introducing on top of bringing back work introduced in #3084 is change of ordrer for some operations. Changing owner of a file, discards capabilities set. This becomes a problem with heartbeat as it needs setuid and netraw capabilities to perform properly. So setting capabilities was moved after chown. --- ...er-runs-on-Azure-Container-Instances-.yaml | 31 +++++++++++++++++++ .../docker/Dockerfile.elastic-agent.tmpl | 29 ++++++++--------- 2 files changed, 46 insertions(+), 14 deletions(-) create mode 100644 changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml diff --git a/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml b/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml new file mode 100644 index 00000000000..df24e655971 --- /dev/null +++ b/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug + +# Change summary; a 80ish characters long description of the change. +summary: Elastic-Agent container runs on Azure Container Instances + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: elastic-agent + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 3576 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 82 diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index da49b14092b..314aa20d150 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -9,7 +9,6 @@ FROM {{ .buildFrom }} AS home COPY beat {{ $beatHome }} RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ - chown -R root:root {{ $beatHome }} && \ find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ @@ -127,25 +126,16 @@ COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses COPY --from=home /opt /opt {{- end }} - -RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary - readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ -{{- end }} -true - {{- if eq .user "root" }} {{- if contains .image_name "-cloud" }} # Generate folder for a stub command that will be overwritten at runtime RUN mkdir /app {{- end }} {{- else }} -RUN groupadd --gid 1000 {{ .BeatName }} -RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} +RUN groupadd --gid 1000 {{ .BeatName }} && \ + useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} && \ + chown -R {{ .user }}:{{ .user }} {{ $beatHome }} && \ + true {{- if contains .image_name "-cloud" }} # Generate folder for a stub command that will be overwritten at runtime @@ -154,6 +144,17 @@ RUN chown {{ .user }} /app {{- end }} {{- end }} +# Keep this after any chown command, chown resets any applied capabilities +RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary + setcap {{ .linux_capabilities }} $(readlink -f {{ $beatBinary }}) && \ +{{- end }} +true + {{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} USER root ENV NODE_PATH={{ $beatHome }}/.node From f7e558f736d5c17b5488a66e9051df814b95c050 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Tue, 17 Oct 2023 12:24:29 +0200 Subject: [PATCH 7/8] install fails if enroll fails (#3554) * fix install/enroll cmd not failing when agent restart fails * surface errors that might occur during enroll * fail install command if agent cannot be restarted * do not print success message if there was an enroll error. Print an error message and the error instead * add logs to show the different enroll attempts * add more context t errors * refactor internal/pkg/agent/install/perms_unix.go and add more context to errors restore main version * ignore agent restart error on enroll tests as there is no agent to be restarted * daemonReloadWithBackoff does not retry on context deadline exceeded and context cancelled * fix typos --- ...-Surface-errors-during-Agent's-enroll.yaml | 32 +++++++++ dev-tools/mage/godaemon.go | 2 +- internal/pkg/agent/cmd/enroll.go | 2 +- internal/pkg/agent/cmd/enroll_cmd.go | 67 ++++++++++++----- internal/pkg/agent/cmd/enroll_cmd_test.go | 72 +++++++++++++------ internal/pkg/agent/cmd/install.go | 4 +- internal/pkg/agent/install/perms_unix.go | 32 +++++---- 7 files changed, 156 insertions(+), 55 deletions(-) create mode 100644 changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml diff --git a/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml b/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml new file mode 100644 index 00000000000..f8361f99433 --- /dev/null +++ b/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Surface errors during Agent's enroll process, failing if any happens. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: install/enroll + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/3207 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/dev-tools/mage/godaemon.go b/dev-tools/mage/godaemon.go index 90960bfe69f..40d5e94564b 100644 --- a/dev-tools/mage/godaemon.go +++ b/dev-tools/mage/godaemon.go @@ -21,7 +21,7 @@ var ( } ) -// BuildGoDaemon builds the go-deamon binary. +// BuildGoDaemon builds the go-daemon binary. func BuildGoDaemon() error { if GOOS != "linux" { return errors.New("go-daemon only builds for linux") diff --git a/internal/pkg/agent/cmd/enroll.go b/internal/pkg/agent/cmd/enroll.go index 1bce5f7e547..adaa278f32f 100644 --- a/internal/pkg/agent/cmd/enroll.go +++ b/internal/pkg/agent/cmd/enroll.go @@ -351,7 +351,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command) error { // Error: failed to fix permissions: chown /Library/Elastic/Agent/data/elastic-agent-c13f91/elastic-agent.app: operation not permitted // This is because we are fixing permissions twice, once during installation and again during the enrollment step. // When we are enrolling as part of installation on MacOS, skip the second attempt to fix permissions. - var fixPermissions bool = fromInstall + fixPermissions := fromInstall if runtime.GOOS == "darwin" { fixPermissions = false } diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index b5992f10188..d57c91d0da6 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -172,7 +172,7 @@ func newEnrollCmd( ) } -// newEnrollCmdWithStore creates an new enrollment and accept a custom store. +// newEnrollCmdWithStore creates a new enrollment and accept a custom store. func newEnrollCmdWithStore( log *logger.Logger, options *enrollCmdOption, @@ -187,10 +187,11 @@ func newEnrollCmdWithStore( }, nil } -// Execute tries to enroll the agent into Fleet. +// Execute enrolls the agent into Fleet. func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { var err error defer c.stopAgent() // ensure its stopped no matter what + span, ctx := apm.StartSpan(ctx, "enroll", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() @@ -235,7 +236,7 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { // Ensure that the agent does not use a proxy configuration // when connecting to the local fleet server. // Note that when running fleet-server the enroll request will be sent to :8220, - // however when the agent is running afterwards requests will be sent to :8221 + // however when the agent is running afterward requests will be sent to :8221 c.remoteConfig.Transport.Proxy.Disable = true } @@ -256,7 +257,7 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { err = c.enrollWithBackoff(ctx, persistentConfig) if err != nil { - return errors.New(err, "fail to enroll") + return fmt.Errorf("fail to enroll: %w", err) } if c.options.FixPermissions { @@ -267,17 +268,23 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { } defer func() { - fmt.Fprintln(streams.Out, "Successfully enrolled the Elastic Agent.") + if err != nil { + fmt.Fprintf(streams.Err, "Something went wrong while enrolling the Elastic Agent: %v\n", err) + } else { + fmt.Fprintln(streams.Out, "Successfully enrolled the Elastic Agent.") + } }() if c.agentProc == nil { - if err := c.daemonReload(ctx); err != nil { - c.log.Infow("Elastic Agent might not be running; unable to trigger restart", "error", err) - } else { - c.log.Info("Successfully triggered restart on running Elastic Agent.") + if err = c.daemonReloadWithBackoff(ctx); err != nil { + c.log.Errorf("Elastic Agent might not be running; unable to trigger restart: %v", err) + return fmt.Errorf("could not reload agent daemon, unable to trigger restart: %w", err) } + + c.log.Info("Successfully triggered restart on running Elastic Agent.") return nil } + c.log.Info("Elastic Agent has been enrolled; start Elastic Agent") return nil } @@ -443,24 +450,35 @@ func (c *enrollCmd) prepareFleetTLS() error { func (c *enrollCmd) daemonReloadWithBackoff(ctx context.Context) error { err := c.daemonReload(ctx) + if err != nil && + (errors.Is(err, context.DeadlineExceeded) || + errors.Is(err, context.Canceled)) { + return fmt.Errorf("could not reload daemon: %w", err) + } if err == nil { return nil } signal := make(chan struct{}) + defer close(signal) backExp := backoff.NewExpBackoff(signal, 10*time.Second, 1*time.Minute) - for i := 5; i >= 0; i-- { + for i := 0; i < 5; i++ { backExp.Wait() c.log.Info("Retrying to restart...") err = c.daemonReload(ctx) + if err != nil && + (errors.Is(err, context.DeadlineExceeded) || + errors.Is(err, context.Canceled)) { + return fmt.Errorf("could not reload daemon after %d retries: %w", + i+1, err) + } if err == nil { - break + return nil } } - close(signal) - return err + return fmt.Errorf("could not reload agent's daemon, all retries failed. Last error: %w", err) } func (c *enrollCmd) daemonReload(ctx context.Context) error { @@ -478,8 +496,20 @@ func (c *enrollCmd) enrollWithBackoff(ctx context.Context, persistentConfig map[ c.log.Infof("Starting enrollment to URL: %s", c.client.URI()) err := c.enroll(ctx, persistentConfig) + if err == nil { + return nil + } + + const deadline = 10 * time.Minute + const frequency = 60 * time.Second + + c.log.Infof("1st enrollment attempt failed, retrying for %s, every %s enrolling to URL: %s", + deadline, + frequency, + c.client.URI()) signal := make(chan struct{}) - backExp := backoff.NewExpBackoff(signal, 60*time.Second, 10*time.Minute) + defer close(signal) + backExp := backoff.NewExpBackoff(signal, frequency, deadline) for { retry := false @@ -498,7 +528,6 @@ func (c *enrollCmd) enrollWithBackoff(ctx context.Context, persistentConfig map[ err = c.enroll(ctx, persistentConfig) } - close(signal) return err } @@ -547,8 +576,10 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte c.options.FleetServer.ElasticsearchInsecure, ) if err != nil { - return err + return fmt.Errorf( + "failed creating fleet-server bootstrap config: %w", err) } + // no longer need bootstrap at this point serverConfig.Server.Bootstrap = false fleetConfig.Server = serverConfig.Server @@ -568,11 +599,11 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte reader, err := yamlToReader(configToStore) if err != nil { - return err + return fmt.Errorf("yamlToReader failed: %w", err) } if err := safelyStoreAgentInfo(c.configStore, reader); err != nil { - return err + return fmt.Errorf("failed to store agent config: %w", err) } // clear action store diff --git a/internal/pkg/agent/cmd/enroll_cmd_test.go b/internal/pkg/agent/cmd/enroll_cmd_test.go index 189ad7b6563..b38d89f9cf2 100644 --- a/internal/pkg/agent/cmd/enroll_cmd_test.go +++ b/internal/pkg/agent/cmd/enroll_cmd_test.go @@ -16,8 +16,11 @@ import ( "os" "runtime" "strconv" + "strings" "testing" + "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -159,14 +162,23 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - err = cmd.Execute(context.Background(), streams) - require.NoError(t, err) - + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + err = cmd.Execute(ctx, streams) + + // There is no agent running, therefore nothing to be restarted. + // However, this will cause the Enroll command to return an error + // which we'll ignore here. + require.ErrorContainsf(t, err, + "could not reload agent daemon, unable to trigger restart", + "enroll command returned an unexpected error") + require.ErrorContainsf(t, err, context.DeadlineExceeded.Error(), + "it should fail only due to %q", context.DeadlineExceeded) config, err := readConfig(store.Content) - require.NoError(t, err) - require.Equal(t, "my-access-api-key", config.AccessAPIKey) - require.Equal(t, host, config.Client.Host) + + assert.Equal(t, "my-access-api-key", config.AccessAPIKey) + assert.Equal(t, host, config.Client.Host) }, )) @@ -216,16 +228,24 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - err = cmd.Execute(context.Background(), streams) - require.NoError(t, err) - - require.True(t, store.Called) - + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + err = cmd.Execute(ctx, streams) + if err != nil && + // There is no agent running, therefore nothing to be restarted. + // However, this will cause the Enroll command to return an error + // which we'll ignore here. + !strings.Contains(err.Error(), + "could not reload agent daemon, unable to trigger restart") { + t.Fatalf("enrrol coms returned and unexpected error: %v", err) + } + + assert.True(t, store.Called) config, err := readConfig(store.Content) - require.NoError(t, err) - require.Equal(t, "my-access-api-key", config.AccessAPIKey) - require.Equal(t, host, config.Client.Host) + assert.NoError(t, err) + assert.Equal(t, "my-access-api-key", config.AccessAPIKey) + assert.Equal(t, host, config.Client.Host) }, )) @@ -275,16 +295,24 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - err = cmd.Execute(context.Background(), streams) - require.NoError(t, err) - - require.True(t, store.Called) - + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + err = cmd.Execute(ctx, streams) + + if err != nil && + // There is no agent running, therefore nothing to be restarted. + // However, this will cause the Enroll command to return an error + // which we'll ignore here. + !strings.Contains(err.Error(), + "could not reload agent daemon, unable to trigger restart") { + t.Fatalf("enrrol coms returned and unexpected error: %v", err) + } + + assert.True(t, store.Called) config, err := readConfig(store.Content) - require.NoError(t, err) - require.Equal(t, "my-access-api-key", config.AccessAPIKey) - require.Equal(t, host, config.Client.Host) + assert.Equal(t, "my-access-api-key", config.AccessAPIKey) + assert.Equal(t, host, config.Client.Host) }, )) diff --git a/internal/pkg/agent/cmd/install.go b/internal/pkg/agent/cmd/install.go index 4fbd37f40da..2cb46bd599d 100644 --- a/internal/pkg/agent/cmd/install.go +++ b/internal/pkg/agent/cmd/install.go @@ -154,7 +154,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { return fmt.Errorf("problem reading prompt response") } if url == "" { - fmt.Fprintf(streams.Out, "Enrollment cancelled because no URL was provided.\n") + fmt.Fprintln(streams.Out, "Enrollment cancelled because no URL was provided.") return nil } } @@ -224,6 +224,8 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { } }() } + + fmt.Fprintln(streams.Out, "Elastic Agent successfully installed, starting enrollment.") } if enroll { diff --git a/internal/pkg/agent/install/perms_unix.go b/internal/pkg/agent/install/perms_unix.go index e84dcd5039c..fc357fd4fde 100644 --- a/internal/pkg/agent/install/perms_unix.go +++ b/internal/pkg/agent/install/perms_unix.go @@ -8,6 +8,7 @@ package install import ( "errors" + "fmt" "io/fs" "os" "path/filepath" @@ -18,19 +19,26 @@ func fixPermissions(topPath string) error { return recursiveRootPermissions(topPath) } -func recursiveRootPermissions(path string) error { - return filepath.Walk(path, func(name string, info fs.FileInfo, err error) error { - if err == nil { - // all files should be owned by root:root - err = os.Chown(name, 0, 0) - if err != nil { - return err - } - // remove any world permissions from the file - err = os.Chmod(name, info.Mode().Perm()&0770) - } else if errors.Is(err, fs.ErrNotExist) { +func recursiveRootPermissions(root string) error { + return filepath.Walk(root, func(path string, info fs.FileInfo, err error) error { + if errors.Is(err, fs.ErrNotExist) { return nil } - return err + if err != nil { + return fmt.Errorf("walk on %q failed: %w", path, err) + } + + // all files should be owned by root:root + err = os.Chown(path, 0, 0) + if err != nil { + return fmt.Errorf("could not fix ownership of %q: %w", path, err) + } + // remove any world permissions from the file + err = os.Chmod(path, info.Mode().Perm()&0770) + if err != nil { + return fmt.Errorf("could not fix permissions of %q: %w", path, err) + } + + return nil }) } From defde80db7ce43db2e319fd2cfb458f16101a69f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:57:15 +0200 Subject: [PATCH 8/8] Changelog for 8.10.4 (#3606) (#3619) * Changelog for 8.10.4 * Update changelog/8.10.4.asciidoc Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> --------- Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> (cherry picked from commit 22938b10f3d98f97eefdbe2d5ca11e6cfb960d8d) # Conflicts: # changelog/fragments/1693427183-install-progress.yaml Co-authored-by: Pierre HILBERT --- changelog/8.10.4.asciidoc | 55 +++++++++++++++++++ changelog/8.10.4.yaml | 25 +++++++++ ...ck-for-package-signature-verification.yaml | 31 ----------- 3 files changed, 80 insertions(+), 31 deletions(-) create mode 100644 changelog/8.10.4.asciidoc create mode 100644 changelog/8.10.4.yaml delete mode 100644 changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml diff --git a/changelog/8.10.4.asciidoc b/changelog/8.10.4.asciidoc new file mode 100644 index 00000000000..c3dbf7c03ab --- /dev/null +++ b/changelog/8.10.4.asciidoc @@ -0,0 +1,55 @@ +// begin 8.10.4 relnotes + +[[release-notes-8.10.4]] +== 8.10.4 + +Review important information about the 8.10.4 release. + + + +[discrete] +[[breaking-changes-8.10.4]] +=== Breaking changes + +Breaking changes can prevent your application from optimal operation and +performance. Before you upgrade, review the breaking changes, then mitigate the +impact to your application. + +elastic-agent:: + +[discrete] +[[breaking-3591]] +.`elastic-agent-autodiscover` library has been updated to version 0.6.4, disabling metadata For `kubernetes.deployment` and `kubernetes.cronjob` fields. +[%collapsible] +==== +*Details* + +The `elastic-agent-autodiscover` Kubernetes library by default comes with `add_resource_metadata.deployment=false` and `add_resource_metadata.cronjob=false`. +*Impact* + +Pods that will be created from deployments or cronjobs will not have the extra metadata field for `kubernetes.deployment` or `kubernetes.cronjob`, respectively. This change was made to avoid the memory impact of keeping the feature enabled in big Kubernetes clusters. +For more information, refer to {agent-pull}3591[#3591]. +==== + + + + + +[discrete] +[[new-features-8.10.4]] +=== New features + +The 8.10.4 release adds the following new and notable features. + + +elastic-agent:: + +* Secondary Fallback For Package Signature Verification. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3453[#https://github.com/elastic/elastic-agent/pull/3453] {elastic-agent-issue}https://github.com/elastic/elastic-agent/issues/3264[#https://github.com/elastic/elastic-agent/issues/3264] ++ +Ability to upgrade securely in air-gapped environment where fleet server is the only reachable URI. + + + + + + + +// end 8.10.4 relnotes diff --git a/changelog/8.10.4.yaml b/changelog/8.10.4.yaml new file mode 100644 index 00000000000..a397d32526b --- /dev/null +++ b/changelog/8.10.4.yaml @@ -0,0 +1,25 @@ +version: 8.10.4 +entries: + - kind: feature + summary: Secondary fallback for package signature verification + description: Ability to upgrade securely in air-gapped environment where fleet server is the only reachable URI. + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3453 + issue: + - https://github.com/elastic/elastic-agent/issues/3264 + timestamp: 1695289867 + file: + name: 1695289867-Secondary-fallback-for-package-signature-verification.yaml + checksum: 8f8c39d9eef2f5b6922353bcab9c4ee1b74b1378 + - kind: breaking-change + summary: Elastic-agent-autodiscover to v0.6.4. Disables metadata for deployment and cronjob + description: Elastic-agent-autodiscover library by default comes with add_resource_metadata.deployment=false and add_resource_metadata.cronjob=false. Pods that will be created from deployments or cronjobs will not have the extra metadata field for kubernetes.deployment or kubernetes.cronjob respectively. + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3591 + issue: [] + timestamp: 1697102363 + file: + name: 1697102363-updating_agentautodiscovery_810.yaml + checksum: fe9015185dc4d3fe85f9c2ebf9f47e64e26fc67d diff --git a/changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml b/changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml deleted file mode 100644 index 07c8c4e5cf8..00000000000 --- a/changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: feature - -# Change summary; a 80ish characters long description of the change. -summary: Secondary fallback for package signature verification - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -description: Ability to upgrade securely in Air gapped environment where fleet server is the only reachable URI. - -# Affected component; a word indicating the component this changeset affects. -component: elastic-agent - -# PR number; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3453 - -# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: https://github.com/elastic/elastic-agent/issues/3264