From fa357a875cfe179f8e4846f70d8c8bececef85ee Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 16 Oct 2023 23:04:28 +0200 Subject: [PATCH 01/15] [Fix] Agent incapable of running on Azure Container Instances (#3576) What this change is introducing on top of bringing back work introduced in #3084 is change of ordrer for some operations. Changing owner of a file, discards capabilities set. This becomes a problem with heartbeat as it needs setuid and netraw capabilities to perform properly. So setting capabilities was moved after chown. --- ...er-runs-on-Azure-Container-Instances-.yaml | 31 +++++++++++++++++++ .../docker/Dockerfile.elastic-agent.tmpl | 29 ++++++++--------- 2 files changed, 46 insertions(+), 14 deletions(-) create mode 100644 changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml diff --git a/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml b/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml new file mode 100644 index 00000000000..df24e655971 --- /dev/null +++ b/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug + +# Change summary; a 80ish characters long description of the change. +summary: Elastic-Agent container runs on Azure Container Instances + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: elastic-agent + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 3576 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 82 diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index da49b14092b..314aa20d150 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -9,7 +9,6 @@ FROM {{ .buildFrom }} AS home COPY beat {{ $beatHome }} RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ - chown -R root:root {{ $beatHome }} && \ find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ @@ -127,25 +126,16 @@ COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses COPY --from=home /opt /opt {{- end }} - -RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary - readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ -{{- end }} -true - {{- if eq .user "root" }} {{- if contains .image_name "-cloud" }} # Generate folder for a stub command that will be overwritten at runtime RUN mkdir /app {{- end }} {{- else }} -RUN groupadd --gid 1000 {{ .BeatName }} -RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} +RUN groupadd --gid 1000 {{ .BeatName }} && \ + useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} && \ + chown -R {{ .user }}:{{ .user }} {{ $beatHome }} && \ + true {{- if contains .image_name "-cloud" }} # Generate folder for a stub command that will be overwritten at runtime @@ -154,6 +144,17 @@ RUN chown {{ .user }} /app {{- end }} {{- end }} +# Keep this after any chown command, chown resets any applied capabilities +RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary + setcap {{ .linux_capabilities }} $(readlink -f {{ $beatBinary }}) && \ +{{- end }} +true + {{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} USER root ENV NODE_PATH={{ $beatHome }}/.node From f7e558f736d5c17b5488a66e9051df814b95c050 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Tue, 17 Oct 2023 12:24:29 +0200 Subject: [PATCH 02/15] install fails if enroll fails (#3554) * fix install/enroll cmd not failing when agent restart fails * surface errors that might occur during enroll * fail install command if agent cannot be restarted * do not print success message if there was an enroll error. Print an error message and the error instead * add logs to show the different enroll attempts * add more context t errors * refactor internal/pkg/agent/install/perms_unix.go and add more context to errors restore main version * ignore agent restart error on enroll tests as there is no agent to be restarted * daemonReloadWithBackoff does not retry on context deadline exceeded and context cancelled * fix typos --- ...-Surface-errors-during-Agent's-enroll.yaml | 32 +++++++++ dev-tools/mage/godaemon.go | 2 +- internal/pkg/agent/cmd/enroll.go | 2 +- internal/pkg/agent/cmd/enroll_cmd.go | 67 ++++++++++++----- internal/pkg/agent/cmd/enroll_cmd_test.go | 72 +++++++++++++------ internal/pkg/agent/cmd/install.go | 4 +- internal/pkg/agent/install/perms_unix.go | 32 +++++---- 7 files changed, 156 insertions(+), 55 deletions(-) create mode 100644 changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml diff --git a/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml b/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml new file mode 100644 index 00000000000..f8361f99433 --- /dev/null +++ b/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Surface errors during Agent's enroll process, failing if any happens. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: install/enroll + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/3207 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/dev-tools/mage/godaemon.go b/dev-tools/mage/godaemon.go index 90960bfe69f..40d5e94564b 100644 --- a/dev-tools/mage/godaemon.go +++ b/dev-tools/mage/godaemon.go @@ -21,7 +21,7 @@ var ( } ) -// BuildGoDaemon builds the go-deamon binary. +// BuildGoDaemon builds the go-daemon binary. func BuildGoDaemon() error { if GOOS != "linux" { return errors.New("go-daemon only builds for linux") diff --git a/internal/pkg/agent/cmd/enroll.go b/internal/pkg/agent/cmd/enroll.go index 1bce5f7e547..adaa278f32f 100644 --- a/internal/pkg/agent/cmd/enroll.go +++ b/internal/pkg/agent/cmd/enroll.go @@ -351,7 +351,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command) error { // Error: failed to fix permissions: chown /Library/Elastic/Agent/data/elastic-agent-c13f91/elastic-agent.app: operation not permitted // This is because we are fixing permissions twice, once during installation and again during the enrollment step. // When we are enrolling as part of installation on MacOS, skip the second attempt to fix permissions. - var fixPermissions bool = fromInstall + fixPermissions := fromInstall if runtime.GOOS == "darwin" { fixPermissions = false } diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index b5992f10188..d57c91d0da6 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -172,7 +172,7 @@ func newEnrollCmd( ) } -// newEnrollCmdWithStore creates an new enrollment and accept a custom store. +// newEnrollCmdWithStore creates a new enrollment and accept a custom store. func newEnrollCmdWithStore( log *logger.Logger, options *enrollCmdOption, @@ -187,10 +187,11 @@ func newEnrollCmdWithStore( }, nil } -// Execute tries to enroll the agent into Fleet. +// Execute enrolls the agent into Fleet. func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { var err error defer c.stopAgent() // ensure its stopped no matter what + span, ctx := apm.StartSpan(ctx, "enroll", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() @@ -235,7 +236,7 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { // Ensure that the agent does not use a proxy configuration // when connecting to the local fleet server. // Note that when running fleet-server the enroll request will be sent to :8220, - // however when the agent is running afterwards requests will be sent to :8221 + // however when the agent is running afterward requests will be sent to :8221 c.remoteConfig.Transport.Proxy.Disable = true } @@ -256,7 +257,7 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { err = c.enrollWithBackoff(ctx, persistentConfig) if err != nil { - return errors.New(err, "fail to enroll") + return fmt.Errorf("fail to enroll: %w", err) } if c.options.FixPermissions { @@ -267,17 +268,23 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { } defer func() { - fmt.Fprintln(streams.Out, "Successfully enrolled the Elastic Agent.") + if err != nil { + fmt.Fprintf(streams.Err, "Something went wrong while enrolling the Elastic Agent: %v\n", err) + } else { + fmt.Fprintln(streams.Out, "Successfully enrolled the Elastic Agent.") + } }() if c.agentProc == nil { - if err := c.daemonReload(ctx); err != nil { - c.log.Infow("Elastic Agent might not be running; unable to trigger restart", "error", err) - } else { - c.log.Info("Successfully triggered restart on running Elastic Agent.") + if err = c.daemonReloadWithBackoff(ctx); err != nil { + c.log.Errorf("Elastic Agent might not be running; unable to trigger restart: %v", err) + return fmt.Errorf("could not reload agent daemon, unable to trigger restart: %w", err) } + + c.log.Info("Successfully triggered restart on running Elastic Agent.") return nil } + c.log.Info("Elastic Agent has been enrolled; start Elastic Agent") return nil } @@ -443,24 +450,35 @@ func (c *enrollCmd) prepareFleetTLS() error { func (c *enrollCmd) daemonReloadWithBackoff(ctx context.Context) error { err := c.daemonReload(ctx) + if err != nil && + (errors.Is(err, context.DeadlineExceeded) || + errors.Is(err, context.Canceled)) { + return fmt.Errorf("could not reload daemon: %w", err) + } if err == nil { return nil } signal := make(chan struct{}) + defer close(signal) backExp := backoff.NewExpBackoff(signal, 10*time.Second, 1*time.Minute) - for i := 5; i >= 0; i-- { + for i := 0; i < 5; i++ { backExp.Wait() c.log.Info("Retrying to restart...") err = c.daemonReload(ctx) + if err != nil && + (errors.Is(err, context.DeadlineExceeded) || + errors.Is(err, context.Canceled)) { + return fmt.Errorf("could not reload daemon after %d retries: %w", + i+1, err) + } if err == nil { - break + return nil } } - close(signal) - return err + return fmt.Errorf("could not reload agent's daemon, all retries failed. Last error: %w", err) } func (c *enrollCmd) daemonReload(ctx context.Context) error { @@ -478,8 +496,20 @@ func (c *enrollCmd) enrollWithBackoff(ctx context.Context, persistentConfig map[ c.log.Infof("Starting enrollment to URL: %s", c.client.URI()) err := c.enroll(ctx, persistentConfig) + if err == nil { + return nil + } + + const deadline = 10 * time.Minute + const frequency = 60 * time.Second + + c.log.Infof("1st enrollment attempt failed, retrying for %s, every %s enrolling to URL: %s", + deadline, + frequency, + c.client.URI()) signal := make(chan struct{}) - backExp := backoff.NewExpBackoff(signal, 60*time.Second, 10*time.Minute) + defer close(signal) + backExp := backoff.NewExpBackoff(signal, frequency, deadline) for { retry := false @@ -498,7 +528,6 @@ func (c *enrollCmd) enrollWithBackoff(ctx context.Context, persistentConfig map[ err = c.enroll(ctx, persistentConfig) } - close(signal) return err } @@ -547,8 +576,10 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte c.options.FleetServer.ElasticsearchInsecure, ) if err != nil { - return err + return fmt.Errorf( + "failed creating fleet-server bootstrap config: %w", err) } + // no longer need bootstrap at this point serverConfig.Server.Bootstrap = false fleetConfig.Server = serverConfig.Server @@ -568,11 +599,11 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte reader, err := yamlToReader(configToStore) if err != nil { - return err + return fmt.Errorf("yamlToReader failed: %w", err) } if err := safelyStoreAgentInfo(c.configStore, reader); err != nil { - return err + return fmt.Errorf("failed to store agent config: %w", err) } // clear action store diff --git a/internal/pkg/agent/cmd/enroll_cmd_test.go b/internal/pkg/agent/cmd/enroll_cmd_test.go index 189ad7b6563..b38d89f9cf2 100644 --- a/internal/pkg/agent/cmd/enroll_cmd_test.go +++ b/internal/pkg/agent/cmd/enroll_cmd_test.go @@ -16,8 +16,11 @@ import ( "os" "runtime" "strconv" + "strings" "testing" + "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -159,14 +162,23 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - err = cmd.Execute(context.Background(), streams) - require.NoError(t, err) - + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + err = cmd.Execute(ctx, streams) + + // There is no agent running, therefore nothing to be restarted. + // However, this will cause the Enroll command to return an error + // which we'll ignore here. + require.ErrorContainsf(t, err, + "could not reload agent daemon, unable to trigger restart", + "enroll command returned an unexpected error") + require.ErrorContainsf(t, err, context.DeadlineExceeded.Error(), + "it should fail only due to %q", context.DeadlineExceeded) config, err := readConfig(store.Content) - require.NoError(t, err) - require.Equal(t, "my-access-api-key", config.AccessAPIKey) - require.Equal(t, host, config.Client.Host) + + assert.Equal(t, "my-access-api-key", config.AccessAPIKey) + assert.Equal(t, host, config.Client.Host) }, )) @@ -216,16 +228,24 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - err = cmd.Execute(context.Background(), streams) - require.NoError(t, err) - - require.True(t, store.Called) - + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + err = cmd.Execute(ctx, streams) + if err != nil && + // There is no agent running, therefore nothing to be restarted. + // However, this will cause the Enroll command to return an error + // which we'll ignore here. + !strings.Contains(err.Error(), + "could not reload agent daemon, unable to trigger restart") { + t.Fatalf("enrrol coms returned and unexpected error: %v", err) + } + + assert.True(t, store.Called) config, err := readConfig(store.Content) - require.NoError(t, err) - require.Equal(t, "my-access-api-key", config.AccessAPIKey) - require.Equal(t, host, config.Client.Host) + assert.NoError(t, err) + assert.Equal(t, "my-access-api-key", config.AccessAPIKey) + assert.Equal(t, host, config.Client.Host) }, )) @@ -275,16 +295,24 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - err = cmd.Execute(context.Background(), streams) - require.NoError(t, err) - - require.True(t, store.Called) - + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + err = cmd.Execute(ctx, streams) + + if err != nil && + // There is no agent running, therefore nothing to be restarted. + // However, this will cause the Enroll command to return an error + // which we'll ignore here. + !strings.Contains(err.Error(), + "could not reload agent daemon, unable to trigger restart") { + t.Fatalf("enrrol coms returned and unexpected error: %v", err) + } + + assert.True(t, store.Called) config, err := readConfig(store.Content) - require.NoError(t, err) - require.Equal(t, "my-access-api-key", config.AccessAPIKey) - require.Equal(t, host, config.Client.Host) + assert.Equal(t, "my-access-api-key", config.AccessAPIKey) + assert.Equal(t, host, config.Client.Host) }, )) diff --git a/internal/pkg/agent/cmd/install.go b/internal/pkg/agent/cmd/install.go index 4fbd37f40da..2cb46bd599d 100644 --- a/internal/pkg/agent/cmd/install.go +++ b/internal/pkg/agent/cmd/install.go @@ -154,7 +154,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { return fmt.Errorf("problem reading prompt response") } if url == "" { - fmt.Fprintf(streams.Out, "Enrollment cancelled because no URL was provided.\n") + fmt.Fprintln(streams.Out, "Enrollment cancelled because no URL was provided.") return nil } } @@ -224,6 +224,8 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { } }() } + + fmt.Fprintln(streams.Out, "Elastic Agent successfully installed, starting enrollment.") } if enroll { diff --git a/internal/pkg/agent/install/perms_unix.go b/internal/pkg/agent/install/perms_unix.go index e84dcd5039c..fc357fd4fde 100644 --- a/internal/pkg/agent/install/perms_unix.go +++ b/internal/pkg/agent/install/perms_unix.go @@ -8,6 +8,7 @@ package install import ( "errors" + "fmt" "io/fs" "os" "path/filepath" @@ -18,19 +19,26 @@ func fixPermissions(topPath string) error { return recursiveRootPermissions(topPath) } -func recursiveRootPermissions(path string) error { - return filepath.Walk(path, func(name string, info fs.FileInfo, err error) error { - if err == nil { - // all files should be owned by root:root - err = os.Chown(name, 0, 0) - if err != nil { - return err - } - // remove any world permissions from the file - err = os.Chmod(name, info.Mode().Perm()&0770) - } else if errors.Is(err, fs.ErrNotExist) { +func recursiveRootPermissions(root string) error { + return filepath.Walk(root, func(path string, info fs.FileInfo, err error) error { + if errors.Is(err, fs.ErrNotExist) { return nil } - return err + if err != nil { + return fmt.Errorf("walk on %q failed: %w", path, err) + } + + // all files should be owned by root:root + err = os.Chown(path, 0, 0) + if err != nil { + return fmt.Errorf("could not fix ownership of %q: %w", path, err) + } + // remove any world permissions from the file + err = os.Chmod(path, info.Mode().Perm()&0770) + if err != nil { + return fmt.Errorf("could not fix permissions of %q: %w", path, err) + } + + return nil }) } From defde80db7ce43db2e319fd2cfb458f16101a69f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:57:15 +0200 Subject: [PATCH 03/15] Changelog for 8.10.4 (#3606) (#3619) * Changelog for 8.10.4 * Update changelog/8.10.4.asciidoc Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> --------- Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> (cherry picked from commit 22938b10f3d98f97eefdbe2d5ca11e6cfb960d8d) # Conflicts: # changelog/fragments/1693427183-install-progress.yaml Co-authored-by: Pierre HILBERT --- changelog/8.10.4.asciidoc | 55 +++++++++++++++++++ changelog/8.10.4.yaml | 25 +++++++++ ...ck-for-package-signature-verification.yaml | 31 ----------- 3 files changed, 80 insertions(+), 31 deletions(-) create mode 100644 changelog/8.10.4.asciidoc create mode 100644 changelog/8.10.4.yaml delete mode 100644 changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml diff --git a/changelog/8.10.4.asciidoc b/changelog/8.10.4.asciidoc new file mode 100644 index 00000000000..c3dbf7c03ab --- /dev/null +++ b/changelog/8.10.4.asciidoc @@ -0,0 +1,55 @@ +// begin 8.10.4 relnotes + +[[release-notes-8.10.4]] +== 8.10.4 + +Review important information about the 8.10.4 release. + + + +[discrete] +[[breaking-changes-8.10.4]] +=== Breaking changes + +Breaking changes can prevent your application from optimal operation and +performance. Before you upgrade, review the breaking changes, then mitigate the +impact to your application. + +elastic-agent:: + +[discrete] +[[breaking-3591]] +.`elastic-agent-autodiscover` library has been updated to version 0.6.4, disabling metadata For `kubernetes.deployment` and `kubernetes.cronjob` fields. +[%collapsible] +==== +*Details* + +The `elastic-agent-autodiscover` Kubernetes library by default comes with `add_resource_metadata.deployment=false` and `add_resource_metadata.cronjob=false`. +*Impact* + +Pods that will be created from deployments or cronjobs will not have the extra metadata field for `kubernetes.deployment` or `kubernetes.cronjob`, respectively. This change was made to avoid the memory impact of keeping the feature enabled in big Kubernetes clusters. +For more information, refer to {agent-pull}3591[#3591]. +==== + + + + + +[discrete] +[[new-features-8.10.4]] +=== New features + +The 8.10.4 release adds the following new and notable features. + + +elastic-agent:: + +* Secondary Fallback For Package Signature Verification. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3453[#https://github.com/elastic/elastic-agent/pull/3453] {elastic-agent-issue}https://github.com/elastic/elastic-agent/issues/3264[#https://github.com/elastic/elastic-agent/issues/3264] ++ +Ability to upgrade securely in air-gapped environment where fleet server is the only reachable URI. + + + + + + + +// end 8.10.4 relnotes diff --git a/changelog/8.10.4.yaml b/changelog/8.10.4.yaml new file mode 100644 index 00000000000..a397d32526b --- /dev/null +++ b/changelog/8.10.4.yaml @@ -0,0 +1,25 @@ +version: 8.10.4 +entries: + - kind: feature + summary: Secondary fallback for package signature verification + description: Ability to upgrade securely in air-gapped environment where fleet server is the only reachable URI. + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3453 + issue: + - https://github.com/elastic/elastic-agent/issues/3264 + timestamp: 1695289867 + file: + name: 1695289867-Secondary-fallback-for-package-signature-verification.yaml + checksum: 8f8c39d9eef2f5b6922353bcab9c4ee1b74b1378 + - kind: breaking-change + summary: Elastic-agent-autodiscover to v0.6.4. Disables metadata for deployment and cronjob + description: Elastic-agent-autodiscover library by default comes with add_resource_metadata.deployment=false and add_resource_metadata.cronjob=false. Pods that will be created from deployments or cronjobs will not have the extra metadata field for kubernetes.deployment or kubernetes.cronjob respectively. + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3591 + issue: [] + timestamp: 1697102363 + file: + name: 1697102363-updating_agentautodiscovery_810.yaml + checksum: fe9015185dc4d3fe85f9c2ebf9f47e64e26fc67d diff --git a/changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml b/changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml deleted file mode 100644 index 07c8c4e5cf8..00000000000 --- a/changelog/fragments/1695289867-Secondary-fallback-for-package-signature-verification.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: feature - -# Change summary; a 80ish characters long description of the change. -summary: Secondary fallback for package signature verification - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -description: Ability to upgrade securely in Air gapped environment where fleet server is the only reachable URI. - -# Affected component; a word indicating the component this changeset affects. -component: elastic-agent - -# PR number; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3453 - -# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: https://github.com/elastic/elastic-agent/issues/3264 From 1fd44fe22fead5ee5904ec48da38270b67a0e624 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 18 Oct 2023 21:21:14 +0200 Subject: [PATCH 04/15] Revert "install fails if enroll fails (#3554)" (#3629) This reverts commit f7e558f736d5c17b5488a66e9051df814b95c050. --- ...-Surface-errors-during-Agent's-enroll.yaml | 32 --------- dev-tools/mage/godaemon.go | 2 +- internal/pkg/agent/cmd/enroll.go | 2 +- internal/pkg/agent/cmd/enroll_cmd.go | 67 +++++------------ internal/pkg/agent/cmd/enroll_cmd_test.go | 72 ++++++------------- internal/pkg/agent/cmd/install.go | 4 +- internal/pkg/agent/install/perms_unix.go | 32 ++++----- 7 files changed, 55 insertions(+), 156 deletions(-) delete mode 100644 changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml diff --git a/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml b/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml deleted file mode 100644 index f8361f99433..00000000000 --- a/changelog/fragments/1693403216-Surface-errors-during-Agent's-enroll.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: bug-fix - -# Change summary; a 80ish characters long description of the change. -summary: Surface errors during Agent's enroll process, failing if any happens. - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. -#description: - -# Affected component; a word indicating the component this changeset affects. -component: install/enroll - -# PR URL; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3207 - -# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -#issue: https://github.com/owner/repo/1234 diff --git a/dev-tools/mage/godaemon.go b/dev-tools/mage/godaemon.go index 40d5e94564b..90960bfe69f 100644 --- a/dev-tools/mage/godaemon.go +++ b/dev-tools/mage/godaemon.go @@ -21,7 +21,7 @@ var ( } ) -// BuildGoDaemon builds the go-daemon binary. +// BuildGoDaemon builds the go-deamon binary. func BuildGoDaemon() error { if GOOS != "linux" { return errors.New("go-daemon only builds for linux") diff --git a/internal/pkg/agent/cmd/enroll.go b/internal/pkg/agent/cmd/enroll.go index adaa278f32f..1bce5f7e547 100644 --- a/internal/pkg/agent/cmd/enroll.go +++ b/internal/pkg/agent/cmd/enroll.go @@ -351,7 +351,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command) error { // Error: failed to fix permissions: chown /Library/Elastic/Agent/data/elastic-agent-c13f91/elastic-agent.app: operation not permitted // This is because we are fixing permissions twice, once during installation and again during the enrollment step. // When we are enrolling as part of installation on MacOS, skip the second attempt to fix permissions. - fixPermissions := fromInstall + var fixPermissions bool = fromInstall if runtime.GOOS == "darwin" { fixPermissions = false } diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index d57c91d0da6..b5992f10188 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -172,7 +172,7 @@ func newEnrollCmd( ) } -// newEnrollCmdWithStore creates a new enrollment and accept a custom store. +// newEnrollCmdWithStore creates an new enrollment and accept a custom store. func newEnrollCmdWithStore( log *logger.Logger, options *enrollCmdOption, @@ -187,11 +187,10 @@ func newEnrollCmdWithStore( }, nil } -// Execute enrolls the agent into Fleet. +// Execute tries to enroll the agent into Fleet. func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { var err error defer c.stopAgent() // ensure its stopped no matter what - span, ctx := apm.StartSpan(ctx, "enroll", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() @@ -236,7 +235,7 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { // Ensure that the agent does not use a proxy configuration // when connecting to the local fleet server. // Note that when running fleet-server the enroll request will be sent to :8220, - // however when the agent is running afterward requests will be sent to :8221 + // however when the agent is running afterwards requests will be sent to :8221 c.remoteConfig.Transport.Proxy.Disable = true } @@ -257,7 +256,7 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { err = c.enrollWithBackoff(ctx, persistentConfig) if err != nil { - return fmt.Errorf("fail to enroll: %w", err) + return errors.New(err, "fail to enroll") } if c.options.FixPermissions { @@ -268,23 +267,17 @@ func (c *enrollCmd) Execute(ctx context.Context, streams *cli.IOStreams) error { } defer func() { - if err != nil { - fmt.Fprintf(streams.Err, "Something went wrong while enrolling the Elastic Agent: %v\n", err) - } else { - fmt.Fprintln(streams.Out, "Successfully enrolled the Elastic Agent.") - } + fmt.Fprintln(streams.Out, "Successfully enrolled the Elastic Agent.") }() if c.agentProc == nil { - if err = c.daemonReloadWithBackoff(ctx); err != nil { - c.log.Errorf("Elastic Agent might not be running; unable to trigger restart: %v", err) - return fmt.Errorf("could not reload agent daemon, unable to trigger restart: %w", err) + if err := c.daemonReload(ctx); err != nil { + c.log.Infow("Elastic Agent might not be running; unable to trigger restart", "error", err) + } else { + c.log.Info("Successfully triggered restart on running Elastic Agent.") } - - c.log.Info("Successfully triggered restart on running Elastic Agent.") return nil } - c.log.Info("Elastic Agent has been enrolled; start Elastic Agent") return nil } @@ -450,35 +443,24 @@ func (c *enrollCmd) prepareFleetTLS() error { func (c *enrollCmd) daemonReloadWithBackoff(ctx context.Context) error { err := c.daemonReload(ctx) - if err != nil && - (errors.Is(err, context.DeadlineExceeded) || - errors.Is(err, context.Canceled)) { - return fmt.Errorf("could not reload daemon: %w", err) - } if err == nil { return nil } signal := make(chan struct{}) - defer close(signal) backExp := backoff.NewExpBackoff(signal, 10*time.Second, 1*time.Minute) - for i := 0; i < 5; i++ { + for i := 5; i >= 0; i-- { backExp.Wait() c.log.Info("Retrying to restart...") err = c.daemonReload(ctx) - if err != nil && - (errors.Is(err, context.DeadlineExceeded) || - errors.Is(err, context.Canceled)) { - return fmt.Errorf("could not reload daemon after %d retries: %w", - i+1, err) - } if err == nil { - return nil + break } } - return fmt.Errorf("could not reload agent's daemon, all retries failed. Last error: %w", err) + close(signal) + return err } func (c *enrollCmd) daemonReload(ctx context.Context) error { @@ -496,20 +478,8 @@ func (c *enrollCmd) enrollWithBackoff(ctx context.Context, persistentConfig map[ c.log.Infof("Starting enrollment to URL: %s", c.client.URI()) err := c.enroll(ctx, persistentConfig) - if err == nil { - return nil - } - - const deadline = 10 * time.Minute - const frequency = 60 * time.Second - - c.log.Infof("1st enrollment attempt failed, retrying for %s, every %s enrolling to URL: %s", - deadline, - frequency, - c.client.URI()) signal := make(chan struct{}) - defer close(signal) - backExp := backoff.NewExpBackoff(signal, frequency, deadline) + backExp := backoff.NewExpBackoff(signal, 60*time.Second, 10*time.Minute) for { retry := false @@ -528,6 +498,7 @@ func (c *enrollCmd) enrollWithBackoff(ctx context.Context, persistentConfig map[ err = c.enroll(ctx, persistentConfig) } + close(signal) return err } @@ -576,10 +547,8 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte c.options.FleetServer.ElasticsearchInsecure, ) if err != nil { - return fmt.Errorf( - "failed creating fleet-server bootstrap config: %w", err) + return err } - // no longer need bootstrap at this point serverConfig.Server.Bootstrap = false fleetConfig.Server = serverConfig.Server @@ -599,11 +568,11 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte reader, err := yamlToReader(configToStore) if err != nil { - return fmt.Errorf("yamlToReader failed: %w", err) + return err } if err := safelyStoreAgentInfo(c.configStore, reader); err != nil { - return fmt.Errorf("failed to store agent config: %w", err) + return err } // clear action store diff --git a/internal/pkg/agent/cmd/enroll_cmd_test.go b/internal/pkg/agent/cmd/enroll_cmd_test.go index b38d89f9cf2..189ad7b6563 100644 --- a/internal/pkg/agent/cmd/enroll_cmd_test.go +++ b/internal/pkg/agent/cmd/enroll_cmd_test.go @@ -16,11 +16,8 @@ import ( "os" "runtime" "strconv" - "strings" "testing" - "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -162,23 +159,14 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - err = cmd.Execute(ctx, streams) - - // There is no agent running, therefore nothing to be restarted. - // However, this will cause the Enroll command to return an error - // which we'll ignore here. - require.ErrorContainsf(t, err, - "could not reload agent daemon, unable to trigger restart", - "enroll command returned an unexpected error") - require.ErrorContainsf(t, err, context.DeadlineExceeded.Error(), - "it should fail only due to %q", context.DeadlineExceeded) - config, err := readConfig(store.Content) + err = cmd.Execute(context.Background(), streams) require.NoError(t, err) - assert.Equal(t, "my-access-api-key", config.AccessAPIKey) - assert.Equal(t, host, config.Client.Host) + config, err := readConfig(store.Content) + + require.NoError(t, err) + require.Equal(t, "my-access-api-key", config.AccessAPIKey) + require.Equal(t, host, config.Client.Host) }, )) @@ -228,24 +216,16 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - err = cmd.Execute(ctx, streams) - if err != nil && - // There is no agent running, therefore nothing to be restarted. - // However, this will cause the Enroll command to return an error - // which we'll ignore here. - !strings.Contains(err.Error(), - "could not reload agent daemon, unable to trigger restart") { - t.Fatalf("enrrol coms returned and unexpected error: %v", err) - } - - assert.True(t, store.Called) + err = cmd.Execute(context.Background(), streams) + require.NoError(t, err) + + require.True(t, store.Called) + config, err := readConfig(store.Content) - assert.NoError(t, err) - assert.Equal(t, "my-access-api-key", config.AccessAPIKey) - assert.Equal(t, host, config.Client.Host) + require.NoError(t, err) + require.Equal(t, "my-access-api-key", config.AccessAPIKey) + require.Equal(t, host, config.Client.Host) }, )) @@ -295,24 +275,16 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) streams, _, _, _ := cli.NewTestingIOStreams() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - err = cmd.Execute(ctx, streams) - - if err != nil && - // There is no agent running, therefore nothing to be restarted. - // However, this will cause the Enroll command to return an error - // which we'll ignore here. - !strings.Contains(err.Error(), - "could not reload agent daemon, unable to trigger restart") { - t.Fatalf("enrrol coms returned and unexpected error: %v", err) - } - - assert.True(t, store.Called) + err = cmd.Execute(context.Background(), streams) + require.NoError(t, err) + + require.True(t, store.Called) + config, err := readConfig(store.Content) + require.NoError(t, err) - assert.Equal(t, "my-access-api-key", config.AccessAPIKey) - assert.Equal(t, host, config.Client.Host) + require.Equal(t, "my-access-api-key", config.AccessAPIKey) + require.Equal(t, host, config.Client.Host) }, )) diff --git a/internal/pkg/agent/cmd/install.go b/internal/pkg/agent/cmd/install.go index 2cb46bd599d..4fbd37f40da 100644 --- a/internal/pkg/agent/cmd/install.go +++ b/internal/pkg/agent/cmd/install.go @@ -154,7 +154,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { return fmt.Errorf("problem reading prompt response") } if url == "" { - fmt.Fprintln(streams.Out, "Enrollment cancelled because no URL was provided.") + fmt.Fprintf(streams.Out, "Enrollment cancelled because no URL was provided.\n") return nil } } @@ -224,8 +224,6 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { } }() } - - fmt.Fprintln(streams.Out, "Elastic Agent successfully installed, starting enrollment.") } if enroll { diff --git a/internal/pkg/agent/install/perms_unix.go b/internal/pkg/agent/install/perms_unix.go index fc357fd4fde..e84dcd5039c 100644 --- a/internal/pkg/agent/install/perms_unix.go +++ b/internal/pkg/agent/install/perms_unix.go @@ -8,7 +8,6 @@ package install import ( "errors" - "fmt" "io/fs" "os" "path/filepath" @@ -19,26 +18,19 @@ func fixPermissions(topPath string) error { return recursiveRootPermissions(topPath) } -func recursiveRootPermissions(root string) error { - return filepath.Walk(root, func(path string, info fs.FileInfo, err error) error { - if errors.Is(err, fs.ErrNotExist) { +func recursiveRootPermissions(path string) error { + return filepath.Walk(path, func(name string, info fs.FileInfo, err error) error { + if err == nil { + // all files should be owned by root:root + err = os.Chown(name, 0, 0) + if err != nil { + return err + } + // remove any world permissions from the file + err = os.Chmod(name, info.Mode().Perm()&0770) + } else if errors.Is(err, fs.ErrNotExist) { return nil } - if err != nil { - return fmt.Errorf("walk on %q failed: %w", path, err) - } - - // all files should be owned by root:root - err = os.Chown(path, 0, 0) - if err != nil { - return fmt.Errorf("could not fix ownership of %q: %w", path, err) - } - // remove any world permissions from the file - err = os.Chmod(path, info.Mode().Perm()&0770) - if err != nil { - return fmt.Errorf("could not fix permissions of %q: %w", path, err) - } - - return nil + return err }) } From 3c2f61a24230105b6f7bb48a821aff4fc2c0b68e Mon Sep 17 00:00:00 2001 From: Lee E Hinman <57081003+leehinman@users.noreply.github.com> Date: Wed, 18 Oct 2023 19:46:32 -0500 Subject: [PATCH 05/15] upgrade elastic-agent-libs to v0.6.0 (#3632) * upgrade elastic-agent-libs to v0.6.0 allows elastic-agent running as a windows service to receive more than one change request. --- NOTICE.txt | 4 +-- ...-upgrade-elastic-agent-libs-to-v0.6.0.yaml | 32 +++++++++++++++++++ go.mod | 2 +- go.sum | 4 +-- 4 files changed, 37 insertions(+), 5 deletions(-) create mode 100644 changelog/fragments/1697662109-upgrade-elastic-agent-libs-to-v0.6.0.yaml diff --git a/NOTICE.txt b/NOTICE.txt index 38ce2d28f66..9bc5150e46a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1367,11 +1367,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.5.0 +Version: v0.6.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.5.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.6.0/LICENSE: Apache License Version 2.0, January 2004 diff --git a/changelog/fragments/1697662109-upgrade-elastic-agent-libs-to-v0.6.0.yaml b/changelog/fragments/1697662109-upgrade-elastic-agent-libs-to-v0.6.0.yaml new file mode 100644 index 00000000000..192434f3b5c --- /dev/null +++ b/changelog/fragments/1697662109-upgrade-elastic-agent-libs-to-v0.6.0.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: upgrade elastic-agent-libs to v0.6.0 + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +description: 'allows elastic-agent running as a windows service to receive more than one change request.' + +# Affected component; a word indicating the component this changeset affects. +component: agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/go.mod b/go.mod index f4d4d171fdc..322a8ef9f3a 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/elastic/e2e-testing v1.1.0 github.com/elastic/elastic-agent-autodiscover v0.6.4 github.com/elastic/elastic-agent-client/v7 v7.4.0 - github.com/elastic/elastic-agent-libs v0.5.0 + github.com/elastic/elastic-agent-libs v0.6.0 github.com/elastic/elastic-agent-system-metrics v0.7.0 github.com/elastic/elastic-transport-go/v8 v8.3.0 github.com/elastic/go-elasticsearch/v8 v8.10.0 diff --git a/go.sum b/go.sum index f27fce36f03..87f5be315fa 100644 --- a/go.sum +++ b/go.sum @@ -782,8 +782,8 @@ github.com/elastic/elastic-agent-autodiscover v0.6.4 h1:K+xC7OGgcy4fLXVuGgOGLs+e github.com/elastic/elastic-agent-autodiscover v0.6.4/go.mod h1:5+7NIBAILc0GkgxYW3ckXncu5wRZfltZhTY4aZAYP4M= github.com/elastic/elastic-agent-client/v7 v7.4.0 h1:h75oTkkvIjgiKVm61NpvTZP4cy6QbQ3zrIpXKGigyjo= github.com/elastic/elastic-agent-client/v7 v7.4.0/go.mod h1:9/amG2K2y2oqx39zURcc+hnqcX+nyJ1cZrLgzsgo5c0= -github.com/elastic/elastic-agent-libs v0.5.0 h1:8LbxSuMiGy8xhHX5NrE/dmTLsLMEuA+2AODUsiBfEcE= -github.com/elastic/elastic-agent-libs v0.5.0/go.mod h1:mpSfrigixx8x+uMxWKl4LtdlrKIhZbA4yT2eIeIazUQ= +github.com/elastic/elastic-agent-libs v0.6.0 h1:HnL/OpAzIHlK8y1J69XQuAx4tlCzd6e2kldMHvXARvY= +github.com/elastic/elastic-agent-libs v0.6.0/go.mod h1:K6U+n84siZ66ZyG36h1/x+fw1oIZbFXEypAC6KSiFOg= github.com/elastic/elastic-agent-system-metrics v0.7.0 h1:qDLY30UDforSd/TfHfqUDiiHSL6Nu6qLXHsKSxz4OuQ= github.com/elastic/elastic-agent-system-metrics v0.7.0/go.mod h1:9C1UEfj0P687HAzZepHszN6zXA+2tN2Lx3Osvq1zby8= github.com/elastic/elastic-integration-corpus-generator-tool v0.5.0/go.mod h1:uf9N86y+UACGybdEhZLpwZ93XHWVhsYZAA4c2T2v6YM= From d64d704812a177d50984501bde0ab1e5e6c5fcf1 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 19 Oct 2023 03:29:36 -0400 Subject: [PATCH 06/15] Change upgrade watcher to use StateWatch (#3622) * Add PID to status output. * Fix watcher interval. * Refactor watcher to use StateWatch removing the need for PID watching completely. * Add changelog. * Fix lint. * Run mage check. * Fix TestStandaloneUpgradeRollbackOnRestarts to use the build watcher. * Make lint happy. * Annoying lint. * Make notice. * Fix windows. * Fix unit test connection on Windows. * Adjust grpc client. * Add back PID tracking. * More watcher fixes. --- NOTICE.txt | 236 ------- ...watcher-to-no-longer-need-root-access.yaml | 32 + control_v2.proto | 4 +- go.mod | 2 - go.sum | 2 - .../application/upgrade/crash_checker.go | 177 ----- .../application/upgrade/crash_checker_test.go | 192 ------ .../application/upgrade/error_checker.go | 106 --- .../application/upgrade/rollback_darwin.go | 43 ++ .../application/upgrade/rollback_linux.go | 45 ++ .../application/upgrade/rollback_windows.go | 29 + .../pkg/agent/application/upgrade/service.go | 294 --------- .../application/upgrade/service_darwin.go | 134 ---- .../application/upgrade/service_windows.go | 70 -- .../pkg/agent/application/upgrade/watcher.go | 247 +++++++ .../agent/application/upgrade/watcher_test.go | 609 ++++++++++++++++++ internal/pkg/agent/cmd/watch.go | 25 +- internal/pkg/agent/configuration/upgrade.go | 7 - internal/pkg/agent/install/install.go | 11 + pkg/control/v1/proto/control_v1.pb.go | 4 +- pkg/control/v1/proto/control_v1_grpc.pb.go | 32 +- pkg/control/v2/client/client.go | 16 +- pkg/control/v2/client/dial.go | 12 +- pkg/control/v2/client/dial_windows.go | 12 +- pkg/control/v2/client/mocks/client.go | 48 +- pkg/control/v2/cproto/control_v2.pb.go | 346 +++++----- pkg/control/v2/cproto/control_v2_grpc.pb.go | 46 +- pkg/control/v2/server/server.go | 2 + testing/integration/upgrade_rollback_test.go | 97 ++- testing/upgradetest/upgrader.go | 20 +- testing/upgradetest/watcher.go | 2 +- 31 files changed, 1396 insertions(+), 1506 deletions(-) create mode 100644 changelog/fragments/1697554456-Improve-upgrade-watcher-to-no-longer-need-root-access.yaml delete mode 100644 internal/pkg/agent/application/upgrade/crash_checker.go delete mode 100644 internal/pkg/agent/application/upgrade/crash_checker_test.go delete mode 100644 internal/pkg/agent/application/upgrade/error_checker.go create mode 100644 internal/pkg/agent/application/upgrade/rollback_darwin.go create mode 100644 internal/pkg/agent/application/upgrade/rollback_linux.go create mode 100644 internal/pkg/agent/application/upgrade/rollback_windows.go delete mode 100644 internal/pkg/agent/application/upgrade/service.go delete mode 100644 internal/pkg/agent/application/upgrade/service_darwin.go delete mode 100644 internal/pkg/agent/application/upgrade/service_windows.go create mode 100644 internal/pkg/agent/application/upgrade/watcher.go create mode 100644 internal/pkg/agent/application/upgrade/watcher_test.go diff --git a/NOTICE.txt b/NOTICE.txt index 9bc5150e46a..2ac1bfb9926 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -204,207 +204,6 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/coreos/go-systemd/v22 -Version: v22.3.3-0.20220203105225-a9a7ef127534 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/coreos/go-systemd/v22@v22.3.3-0.20220203105225-a9a7ef127534/LICENSE: - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : github.com/docker/go-units Version: v0.5.0 @@ -11391,41 +11190,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/godbus/dbus/v5 -Version: v5.0.6 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/godbus/dbus/v5@v5.0.6/LICENSE: - -Copyright (c) 2013, Georg Reinke (), Google -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/gogo/protobuf Version: v1.3.2 diff --git a/changelog/fragments/1697554456-Improve-upgrade-watcher-to-no-longer-need-root-access.yaml b/changelog/fragments/1697554456-Improve-upgrade-watcher-to-no-longer-need-root-access.yaml new file mode 100644 index 00000000000..82c757c7d9c --- /dev/null +++ b/changelog/fragments/1697554456-Improve-upgrade-watcher-to-no-longer-need-root-access.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Improve upgrade watcher to no longer need root access + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/3622 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/control_v2.proto b/control_v2.proto index 06ce89bf710..a94fe8eaa9b 100644 --- a/control_v2.proto +++ b/control_v2.proto @@ -166,6 +166,8 @@ message StateAgentInfo { string buildTime = 4; // Current running version is a snapshot. bool snapshot = 5; + // Current running PID. + int32 pid = 6; } // StateResponse is the current state of Elastic Agent. @@ -214,7 +216,7 @@ enum AdditionalDiagnosticRequest { CPU = 0; } -// DiagnosticComponentsRequest is the message to request diagnostics from individual components. +// DiagnosticComponentsRequest is the message to request diagnostics from individual components. message DiagnosticComponentsRequest { repeated DiagnosticComponentRequest components = 1; repeated AdditionalDiagnosticRequest additional_metrics = 2; diff --git a/go.mod b/go.mod index 322a8ef9f3a..e6e2a1cdd67 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,6 @@ require ( github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e github.com/cenkalti/backoff/v4 v4.1.3 - github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.5.0 github.com/dolmen-go/contextio v0.0.0-20200217195037-68fc5150bcd5 github.com/elastic/e2e-testing v1.1.0 @@ -100,7 +99,6 @@ require ( github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gobuffalo/here v0.6.0 // indirect - github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect diff --git a/go.sum b/go.sum index 87f5be315fa..41b12f0955f 100644 --- a/go.sum +++ b/go.sum @@ -701,7 +701,6 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 h1:rtAn27wIbmOGUs7RIbVgPEjb31ehTVniDwPGXyMxm5U= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -939,7 +938,6 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= diff --git a/internal/pkg/agent/application/upgrade/crash_checker.go b/internal/pkg/agent/application/upgrade/crash_checker.go deleted file mode 100644 index 67bc2bf862c..00000000000 --- a/internal/pkg/agent/application/upgrade/crash_checker.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package upgrade - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - evaluatedPeriods = 6 // with 10s period this means we evaluate 60s of agent run - crashesAllowed = 2 // means that within 60s one restart is allowed, additional one is considered crash -) - -type serviceHandler interface { - PID(ctx context.Context) (int, error) - Name() string - Close() -} - -// CrashChecker checks agent for crash pattern in Elastic Agent lifecycle. -type CrashChecker struct { - notifyChan chan error - q *distinctQueue - log *logger.Logger - sc serviceHandler - checkInterval time.Duration -} - -// NewCrashChecker creates a new crash checker. -func NewCrashChecker(ctx context.Context, ch chan error, log *logger.Logger, checkInterval time.Duration) (*CrashChecker, error) { - q, err := newDistinctQueue(evaluatedPeriods) - if err != nil { - return nil, err - } - - c := &CrashChecker{ - notifyChan: ch, - q: q, - log: log, - checkInterval: checkInterval, - } - - if err := c.Init(ctx, log); err != nil { - return nil, err - } - - log.Infof("running checks using '%s' controller", c.sc.Name()) - - return c, nil -} - -// Run runs the checking loop. -func (ch *CrashChecker) Run(ctx context.Context) { - defer ch.sc.Close() - - ch.log.Info("Crash checker started") - for { - t := time.NewTimer(ch.checkInterval) - - select { - case <-ctx.Done(): - t.Stop() - return - case <-t.C: - pid, err := ch.sc.PID(ctx) - if err != nil { - ch.log.Error(err) - } - - ch.log.Infof("retrieved service PID [%d]", pid) - ch.q.Push(pid) - - // We decide if the Agent process has crashed in either of - // these two ways. - ch.checkNotRunning() - ch.checkRestarted() - } - } -} - -// checkNotRunning checks if the PID reported for the Agent process has -// remained 0 for most recent crashesAllowed times the PID was checked. -// If so, it decides that the service has crashed. -func (ch *CrashChecker) checkNotRunning() { - // If PID has remained 0 for the most recent crashesAllowed number of checks, - // we consider the Agent as having crashed. - if ch.q.Len() < crashesAllowed { - // Not enough history of PIDs yet - return - } - - recentPIDs := ch.q.Peek(crashesAllowed) - ch.log.Debugf("most recent %d service PIDs within %d evaulations: %v", crashesAllowed, evaluatedPeriods, recentPIDs) - - allZeroPIDs := true - for _, recentPID := range recentPIDs { - allZeroPIDs = allZeroPIDs && (recentPID == 0) - } - - if allZeroPIDs { - msg := fmt.Sprintf("service remained crashed (PID = 0) within '%v' seconds", ch.checkInterval.Seconds()) - ch.notifyChan <- errors.New(msg) - } -} - -// checkRestarted checks if the PID reported for the Agent process has -// changed more than crashesAllowed times. If so, it decides that the service -// has crashed. -func (ch *CrashChecker) checkRestarted() { - restarts := ch.q.Distinct() - ch.log.Debugf("service PID changed %d times within %d evaluations", restarts, evaluatedPeriods) - - if restarts > crashesAllowed { - msg := fmt.Sprintf("service restarted '%d' times within '%v' seconds", restarts, ch.checkInterval.Seconds()) - ch.notifyChan <- errors.New(msg) - } -} - -type distinctQueue struct { - q []int - size int - lock sync.Mutex -} - -func newDistinctQueue(size int) (*distinctQueue, error) { - if size < 1 { - return nil, errors.New("invalid size", errors.TypeUnexpected) - } - return &distinctQueue{ - q: make([]int, 0, size), - size: size, - }, nil -} - -func (dq *distinctQueue) Push(id int) { - dq.lock.Lock() - defer dq.lock.Unlock() - - cutIdx := len(dq.q) - if dq.size-1 < len(dq.q) { - cutIdx = dq.size - 1 - } - dq.q = append([]int{id}, dq.q[:cutIdx]...) -} - -func (dq *distinctQueue) Distinct() int { - dq.lock.Lock() - defer dq.lock.Unlock() - - dm := make(map[int]int) - - for _, id := range dq.q { - dm[id] = 1 - } - - return len(dm) -} - -func (dq *distinctQueue) Len() int { - return len(dq.q) -} - -func (dq *distinctQueue) Peek(size int) []int { - if size > len(dq.q) { - size = len(dq.q) - } - - return dq.q[:size] -} diff --git a/internal/pkg/agent/application/upgrade/crash_checker_test.go b/internal/pkg/agent/application/upgrade/crash_checker_test.go deleted file mode 100644 index 0b62f3b99a8..00000000000 --- a/internal/pkg/agent/application/upgrade/crash_checker_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package upgrade - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -var ( - testCheckPeriod = 100 * time.Millisecond -) - -func TestChecker(t *testing.T) { - t.Run("no failure when no change", func(t *testing.T) { - pider := &testPider{pid: 111} - ch, errChan := testableChecker(t, pider) - ctx, cancel := context.WithCancel(context.Background()) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - wg.Done() - ch.Run(ctx) - }() - - wg.Wait() - <-time.After(6 * testCheckPeriod) - - var err error - select { - case err = <-errChan: - default: - } - - cancel() - require.NoError(t, err) - }) - - t.Run("no failure when unfrequent change", func(t *testing.T) { - const startingPID = 222 - pider := &testPider{pid: startingPID} - ch, errChan := testableChecker(t, pider) - ctx, cancel := context.WithCancel(context.Background()) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - wg.Done() - ch.Run(ctx) - }() - - wg.Wait() - for i := 0; i < 2; i++ { - <-time.After(3 * testCheckPeriod) - pider.Change(startingPID + i) - } - var err error - select { - case err = <-errChan: - default: - } - - cancel() - require.NoError(t, err) - }) - - t.Run("no failure when change lower than limit", func(t *testing.T) { - const startingPID = 333 - pider := &testPider{pid: startingPID} - ch, errChan := testableChecker(t, pider) - ctx, cancel := context.WithCancel(context.Background()) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - wg.Done() - ch.Run(ctx) - }() - - wg.Wait() - for i := 0; i < 3; i++ { - <-time.After(7 * testCheckPeriod) - pider.Change(startingPID + i) - } - var err error - select { - case err = <-errChan: - default: - } - - cancel() - require.NoError(t, err) - }) - - t.Run("fails when pid changes frequently", func(t *testing.T) { - pider := &testPider{} - ch, errChan := testableChecker(t, pider) - ctx, cancel := context.WithCancel(context.Background()) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - wg.Done() - ch.Run(ctx) - }() - - wg.Wait() - for i := 0; i < 12; i++ { - <-time.After(testCheckPeriod / 2) - pider.Change(i) - } - var err error - select { - case err = <-errChan: - default: - } - - cancel() - assert.ErrorContains(t, err, "service restarted '3' times within '0.1' seconds") - }) - - t.Run("fails when pid remains 0", func(t *testing.T) { - const startingPID = 0 - pider := &testPider{pid: startingPID} - ch, errChan := testableChecker(t, pider) - ctx, cancel := context.WithCancel(context.Background()) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - wg.Done() - ch.Run(ctx) - }() - - wg.Wait() - for i := 0; i < 3; i++ { - <-time.After(testCheckPeriod * 3) - pider.Change(startingPID) // don't change PID - } - var err error - select { - case err = <-errChan: - default: - } - - cancel() - assert.ErrorContains(t, err, "service remained crashed (PID = 0) within '0.1' seconds") - }) -} - -func testableChecker(t *testing.T, pider *testPider) (*CrashChecker, chan error) { - errChan := make(chan error, 1) - l, _ := logger.New("", false) - ch, err := NewCrashChecker(context.Background(), errChan, l, testCheckPeriod) - require.NoError(t, err) - - ch.sc.Close() - ch.sc = pider - - return ch, errChan -} - -type testPider struct { - sync.Mutex - pid int -} - -func (p *testPider) Change(pid int) { - p.Lock() - defer p.Unlock() - p.pid = pid -} - -func (p *testPider) PID(ctx context.Context) (int, error) { - p.Lock() - defer p.Unlock() - return p.pid, nil -} - -func (p *testPider) Close() {} - -func (p *testPider) Name() string { return "testPider" } diff --git a/internal/pkg/agent/application/upgrade/error_checker.go b/internal/pkg/agent/application/upgrade/error_checker.go deleted file mode 100644 index a0a3516c94a..00000000000 --- a/internal/pkg/agent/application/upgrade/error_checker.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package upgrade - -import ( - "context" - "fmt" - "time" - - "github.com/elastic/elastic-agent/pkg/control" - "github.com/elastic/elastic-agent/pkg/control/v2/client" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - statusCheckMissesAllowed = 4 // enable 2 minute start -) - -// ErrAgentStatusFailed is returned when agent reports FAILED status. -var ErrAgentStatusFailed = errors.New("agent in a failed state", errors.TypeApplication) - -// ErrorChecker checks agent for status change and sends an error to a channel if found. -type ErrorChecker struct { - failuresCounter int - notifyChan chan error - log *logger.Logger - agentClient client.Client - checkInterval time.Duration -} - -// NewErrorChecker creates a new error checker. -func NewErrorChecker(ch chan error, log *logger.Logger, checkInterval time.Duration) (*ErrorChecker, error) { - c := client.New() - ec := &ErrorChecker{ - notifyChan: ch, - agentClient: c, - log: log, - checkInterval: checkInterval, - } - - return ec, nil -} - -// Run runs the checking loop. -func (ch *ErrorChecker) Run(ctx context.Context) { - ch.log.Info("Error checker started") - for { - t := time.NewTimer(ch.checkInterval) - select { - case <-ctx.Done(): - t.Stop() - return - case <-t.C: - err := ch.agentClient.Connect(ctx) - if err != nil { - ch.failuresCounter++ - ch.log.Error(err, "Failed communicating to running daemon", errors.TypeNetwork, errors.M("socket", control.Address())) - ch.checkFailures() - - continue - } - - state, err := ch.agentClient.State(ctx) - ch.agentClient.Disconnect() - if err != nil { - ch.log.Error("failed retrieving agent status", err) - ch.failuresCounter++ - ch.checkFailures() - - // agent is probably not running and this will be detected by pid watcher - continue - } - - // call was successful, reset counter - ch.failuresCounter = 0 - - if state.State == client.Failed { - ch.log.Error("error checker notifying failure of agent") - ch.notifyChan <- ErrAgentStatusFailed - } - - for _, comp := range state.Components { - if comp.State == client.Failed { - err = multierror.Append(err, errors.New(fmt.Sprintf("component %s[%v] failed: %s", comp.Name, comp.ID, comp.Message))) - } - } - - if err != nil { - ch.log.Error("error checker notifying failure of applications") - ch.notifyChan <- errors.New(err, "applications in a failed state", errors.TypeApplication) - } - } - } -} - -func (ch *ErrorChecker) checkFailures() { - if failures := ch.failuresCounter; failures > statusCheckMissesAllowed { - ch.notifyChan <- errors.New(fmt.Sprintf("service failed to fetch agent status '%d' times in a row", failures)) - } -} diff --git a/internal/pkg/agent/application/upgrade/rollback_darwin.go b/internal/pkg/agent/application/upgrade/rollback_darwin.go new file mode 100644 index 00000000000..5ab2ab4cd01 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/rollback_darwin.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build darwin + +package upgrade + +import ( + "os" + "os/exec" + "syscall" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +const ( + // delay after agent restart is performed to allow agent to tear down all the processes + // important mainly for windows, as it prevents removing files which are in use + afterRestartDelay = 2 * time.Second +) + +func invokeCmd() *exec.Cmd { + // #nosec G204 -- user cannot inject any parameters to this command + cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, + "--path.config", paths.Config(), + "--path.home", paths.Top(), + ) + + var cred = &syscall.Credential{ + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + Groups: nil, + NoSetGroups: true, + } + var sysproc = &syscall.SysProcAttr{ + Credential: cred, + Setsid: true, + } + cmd.SysProcAttr = sysproc + return cmd +} diff --git a/internal/pkg/agent/application/upgrade/rollback_linux.go b/internal/pkg/agent/application/upgrade/rollback_linux.go new file mode 100644 index 00000000000..934e3953fa0 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/rollback_linux.go @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux + +package upgrade + +import ( + "os" + "os/exec" + "syscall" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +const ( + // delay after agent restart is performed to allow agent to tear down all the processes + // important mainly for windows, as it prevents removing files which are in use + afterRestartDelay = 2 * time.Second +) + +func invokeCmd() *exec.Cmd { + // #nosec G204 -- user cannot inject any parameters to this command + cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, + "--path.config", paths.Config(), + "--path.home", paths.Top(), + ) + + var cred = &syscall.Credential{ + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + Groups: nil, + NoSetGroups: true, + } + var sysproc = &syscall.SysProcAttr{ + Credential: cred, + Setsid: true, + // propagate sigint instead of sigkill so we can ignore it + Pdeathsig: syscall.SIGINT, + } + cmd.SysProcAttr = sysproc + return cmd +} diff --git a/internal/pkg/agent/application/upgrade/rollback_windows.go b/internal/pkg/agent/application/upgrade/rollback_windows.go new file mode 100644 index 00000000000..2315202e770 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/rollback_windows.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package upgrade + +import ( + "os/exec" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +const ( + // delay after agent restart is performed to allow agent to tear down all the processes + // important mainly for windows, as it prevents removing files which are in use + afterRestartDelay = 15 * time.Second +) + +func invokeCmd() *exec.Cmd { + // #nosec G204 -- user cannot inject any parameters to this command + cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, + "--path.config", paths.Config(), + "--path.home", paths.Top(), + ) + return cmd +} diff --git a/internal/pkg/agent/application/upgrade/service.go b/internal/pkg/agent/application/upgrade/service.go deleted file mode 100644 index cfb01945017..00000000000 --- a/internal/pkg/agent/application/upgrade/service.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !darwin && !windows - -package upgrade - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - "syscall" - "time" - - "github.com/coreos/go-systemd/v22/dbus" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // delay after agent restart is performed to allow agent to tear down all the processes - // important mainly for windows, as it prevents removing files which are in use - afterRestartDelay = 2 * time.Second -) - -type pidProvider interface { - Init() error - Close() - PID(ctx context.Context) (int, error) - Name() string -} - -// Init initializes os dependent properties. -func (ch *CrashChecker) Init(ctx context.Context, _ *logger.Logger) error { - pp := relevantPidProvider() - if err := pp.Init(); err != nil { - return fmt.Errorf("unable to initialize relevant PID provider: %w", err) - } - - ch.sc = pp - - return nil -} - -func relevantPidProvider() pidProvider { - var pp pidProvider - - switch { - case isSystemd(): - pp = &dbusPidProvider{} - case isUpstart(): - pp = &upstartPidProvider{} - case isSysv(): - pp = &sysvPidProvider{} - default: - // in case we're using unsupported service manager - // let other checks work - pp = &noopPidProvider{} - } - - return pp -} - -// Upstart PID Provider - -type upstartPidProvider struct{} - -func (p *upstartPidProvider) Init() error { return nil } - -func (p *upstartPidProvider) Close() {} - -func (p *upstartPidProvider) Name() string { return "Upstart" } - -func (p *upstartPidProvider) PID(ctx context.Context) (int, error) { - listCmd := exec.Command("/sbin/status", agentName) - out, err := listCmd.Output() - if err != nil { - return 0, errors.New("failed to read process id", err) - } - - // find line - pidLine := strings.TrimSpace(string(out)) - if pidLine == "" { - return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", paths.ServiceName)) - } - - re := regexp.MustCompile(agentName + ` start/running, process ([0-9]+)`) - matches := re.FindStringSubmatch(pidLine) - if len(matches) != 2 { - return 0, errors.New("could not detect pid of process", pidLine, matches) - } - - pid, err := strconv.Atoi(matches[1]) - if err != nil { - return 0, errors.New(fmt.Sprintf("failed to get process id[%v]", matches[1]), err) - } - - return pid, nil -} - -// SYSV PID Provider - -type sysvPidProvider struct{} - -func (p *sysvPidProvider) Init() error { return nil } - -func (p *sysvPidProvider) Close() {} - -func (p *sysvPidProvider) Name() string { return "SysV" } - -func (p *sysvPidProvider) PID(ctx context.Context) (int, error) { - listCmd := exec.Command("service", agentName, "status") - out, err := listCmd.Output() - if err != nil { - return 0, errors.New("failed to read process id", err) - } - - // find line - statusLine := strings.TrimSpace(string(out)) - if statusLine == "" { - return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", paths.ServiceName)) - } - - // sysv does not report pid, let's do best effort - if !strings.HasPrefix(statusLine, "Running") { - return 0, errors.New(fmt.Sprintf("'%v' is not running", paths.ServiceName)) - } - - cmdArgs := filepath.Join(paths.Top(), paths.BinaryName) - pidofLine, err := exec.Command("pidof", cmdArgs).Output() - if err != nil { - return 0, errors.New(fmt.Sprintf("PID not found for'%v': %v", paths.ServiceName, err)) - } - - pid, err := strconv.Atoi(strings.TrimSpace(string(pidofLine))) - if err != nil { - return 0, errors.New("PID not a number") - } - - return pid, nil -} - -// DBUS PID provider - -type dbusPidProvider struct { - dbusConn *dbus.Conn -} - -func (p *dbusPidProvider) Init() error { - dbusConn, err := dbus.NewWithContext(context.Background()) - if err != nil { - return errors.New("failed to create dbus connection", err) - } - - p.dbusConn = dbusConn - return nil -} - -func (p *dbusPidProvider) Name() string { return "DBus" } - -func (p *dbusPidProvider) Close() { - p.dbusConn.Close() -} - -func (p *dbusPidProvider) PID(ctx context.Context) (int, error) { - sn := paths.ServiceName - if !strings.HasSuffix(sn, ".service") { - sn += ".service" - } - - prop, err := p.dbusConn.GetServicePropertyContext(ctx, sn, "MainPID") - if err != nil { - return 0, errors.New("failed to read service", err) - } - - pid, ok := prop.Value.Value().(uint32) - if !ok { - return 0, errors.New("failed to get process id", prop.Value.Value()) - } - - return int(pid), nil -} - -// noop PID provider - -type noopPidProvider struct{} - -func (p *noopPidProvider) Init() error { return nil } - -func (p *noopPidProvider) Close() {} - -func (p *noopPidProvider) Name() string { return "noop" } - -func (p *noopPidProvider) PID(ctx context.Context) (int, error) { return 0, nil } - -func invokeCmd() *exec.Cmd { - // #nosec G204 -- user cannot inject any parameters to this command - cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, - "--path.config", paths.Config(), - "--path.home", paths.Top(), - ) - - var cred = &syscall.Credential{ - Uid: uint32(os.Getuid()), - Gid: uint32(os.Getgid()), - Groups: nil, - NoSetGroups: true, - } - var sysproc = &syscall.SysProcAttr{ - Credential: cred, - Setsid: true, - // propagate sigint instead of sigkill so we can ignore it - Pdeathsig: syscall.SIGINT, - } - cmd.SysProcAttr = sysproc - return cmd -} - -func isSystemd() bool { - if _, err := os.Stat("/run/systemd/system"); err == nil { - return true - } - - if _, err := os.Stat("/proc/1/comm"); err == nil { - filerc, err := os.Open("/proc/1/comm") - if err != nil { - return false - } - defer filerc.Close() - - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(filerc); err != nil { - return false - } - contents := buf.String() - - if strings.Trim(contents, " \r\n") == "systemd" { - return true - } - } - return false -} - -func isUpstart() bool { - if _, err := os.Stat("/sbin/upstart-udev-bridge"); err == nil { - return true - } - - if _, err := os.Stat("/sbin/initctl"); err == nil { - if out, err := exec.Command("/sbin/initctl", "--version").Output(); err == nil { - if bytes.Contains(out, []byte("initctl (upstart")) { - return true - } - } - } - return false -} - -func isSysv() bool { - // PID 1 is init - out, err := exec.Command("sudo", "cat", "/proc/1/comm").Output() - if err != nil { - o, err := exec.Command("cat", "/proc/1/comm").Output() - if err != nil { - return false - } - out = o - } - - if strings.TrimSpace(string(out)) != "init" { - return false - } - - // /sbin/init is not a link - initFile, err := os.Open("/sbin/init") - if err != nil || initFile == nil { - return false - } - - fi, err := initFile.Stat() - if err != nil { - return false - } - return fi.Mode()&os.ModeSymlink != os.ModeSymlink -} diff --git a/internal/pkg/agent/application/upgrade/service_darwin.go b/internal/pkg/agent/application/upgrade/service_darwin.go deleted file mode 100644 index 8886e7b414e..00000000000 --- a/internal/pkg/agent/application/upgrade/service_darwin.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build darwin - -package upgrade - -import ( - "bufio" - "bytes" - "context" - "fmt" - "os" - "os/exec" - "regexp" - "strconv" - "strings" - "syscall" - "time" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // delay after agent restart is performed to allow agent to tear down all the processes - // important mainly for windows, as it prevents removing files which are in use - afterRestartDelay = 2 * time.Second -) - -// Init initializes os dependent properties. -func (ch *CrashChecker) Init(ctx context.Context, _ *logger.Logger) error { - ch.sc = &darwinPidProvider{} - - return nil -} - -type darwinPidProvider struct{} - -func (p *darwinPidProvider) Name() string { return "launchd" } - -func (p *darwinPidProvider) Close() {} - -func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { - piders := []func(context.Context) (int, error){ - p.piderFromCmd("launchctl", "list", paths.ServiceName), - } - - // if release is specifically built to be upgradeable (using DEV flag) - // we dont require to run as a service and will need sudo fallback - if release.Upgradeable() { - piders = append(piders, p.piderFromCmd("sudo", "launchctl", "list", paths.ServiceName)) - } - - var pidErrors error - for _, pider := range piders { - pid, err := pider(ctx) - if err == nil { - return pid, nil - } - - pidErrors = multierror.Append(pidErrors, err) - } - - return 0, pidErrors -} - -func (p *darwinPidProvider) piderFromCmd(name string, args ...string) func(context.Context) (int, error) { - return func(context.Context) (int, error) { - listCmd := exec.Command(name, args...) - listCmd.SysProcAttr = &syscall.SysProcAttr{ - Credential: &syscall.Credential{Uid: 0, Gid: 0}, - } - out, err := listCmd.Output() - if err != nil { - return 0, errors.New("failed to read process id", err) - } - - // find line - pidLine := "" - reader := bufio.NewReader(bytes.NewReader(out)) - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - line := scanner.Text() - if strings.Contains(line, `"PID" = `) { - pidLine = strings.TrimSpace(line) - break - } - } - - if pidLine == "" { - return 0, errors.New(fmt.Sprintf("service process not found for service '%v'", paths.ServiceName)) - } - - re := regexp.MustCompile(`"PID" = ([0-9]+);`) - matches := re.FindStringSubmatch(pidLine) - if len(matches) != 2 { - return 0, errors.New("could not detect pid of process", pidLine, matches) - } - - pid, err := strconv.Atoi(matches[1]) - if err != nil { - return 0, errors.New(fmt.Sprintf("failed to get process id[%v]", matches[1]), err) - } - - return pid, nil - } -} - -func invokeCmd() *exec.Cmd { - // #nosec G204 -- user cannot inject any parameters to this command - cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, - "--path.config", paths.Config(), - "--path.home", paths.Top(), - ) - - var cred = &syscall.Credential{ - Uid: uint32(os.Getuid()), - Gid: uint32(os.Getgid()), - Groups: nil, - NoSetGroups: true, - } - var sysproc = &syscall.SysProcAttr{ - Credential: cred, - Setsid: true, - } - cmd.SysProcAttr = sysproc - return cmd -} diff --git a/internal/pkg/agent/application/upgrade/service_windows.go b/internal/pkg/agent/application/upgrade/service_windows.go deleted file mode 100644 index 52512a1843b..00000000000 --- a/internal/pkg/agent/application/upgrade/service_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build windows - -package upgrade - -import ( - "context" - "os/exec" - "time" - - "golang.org/x/sys/windows/svc/mgr" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // delay after agent restart is performed to allow agent to tear down all the processes - // important mainly for windows, as it prevents removing files which are in use - afterRestartDelay = 15 * time.Second -) - -// Init initializes os dependent properties. -func (ch *CrashChecker) Init(ctx context.Context, _ *logger.Logger) error { - mgr, err := mgr.Connect() - if err != nil { - return errors.New("failed to initiate service manager", err) - } - - ch.sc = &pidProvider{ - winManager: mgr, - } - - return nil -} - -type pidProvider struct { - winManager *mgr.Mgr -} - -func (p *pidProvider) Close() {} - -func (p *pidProvider) Name() string { return "Windows Service Manager" } - -func (p *pidProvider) PID(ctx context.Context) (int, error) { - svc, err := p.winManager.OpenService(paths.ServiceName) - if err != nil { - return 0, errors.New("failed to read windows service", err) - } - - status, err := svc.Query() - if err != nil { - return 0, errors.New("failed to read windows service PID: %v", err) - } - - return int(status.ProcessId), nil -} - -func invokeCmd() *exec.Cmd { - // #nosec G204 -- user cannot inject any parameters to this command - cmd := exec.Command(paths.TopBinaryPath(), watcherSubcommand, - "--path.config", paths.Config(), - "--path.home", paths.Top(), - ) - return cmd -} diff --git a/internal/pkg/agent/application/upgrade/watcher.go b/internal/pkg/agent/application/upgrade/watcher.go new file mode 100644 index 00000000000..637129f82dc --- /dev/null +++ b/internal/pkg/agent/application/upgrade/watcher.go @@ -0,0 +1,247 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-multierror" + "google.golang.org/grpc" + + "github.com/elastic/elastic-agent/pkg/control/v2/client" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + statusCheckMissesAllowed = 4 // enable 2 minute start (30 second periods) + statusLossesAllowed = 2 // enable connection lost to agent twice + statusFailureFlipFlopsAllowed = 3 // no more than three failure flip-flops allowed +) + +var ( + // ErrCannotConnect is returned when connection cannot be made to the agent. + ErrCannotConnect = errors.New("failed to connect to agent daemon") + // ErrLostConnection is returned when connection is lost to the agent daemon. + ErrLostConnection = errors.New("lost connection to agent daemon") + // ErrAgentStatusFailed is returned when agent reports FAILED status. + ErrAgentStatusFailed = errors.New("agent reported failed state") + // ErrAgentComponentFailed is returned when agent reports FAILED status for a component + ErrAgentComponentFailed = errors.New("agent reported failed component(s) state") + // ErrAgentFlipFlopFailed is returned when agent flip-flops between failed and healthy. + ErrAgentFlipFlopFailed = errors.New("agent reported on and off failures ") +) + +// AgentWatcher watches for the ability to connect to the running Elastic Agent, if it reports any errors +// and how many times it disconnects from the Elastic Agent while running. +type AgentWatcher struct { + connectCounter int + lostCounter int + lastPid int32 + + notifyChan chan error + log *logger.Logger + agentClient client.Client + checkInterval time.Duration +} + +// NewAgentWatcher creates a new agent watcher. +func NewAgentWatcher(ch chan error, log *logger.Logger, checkInterval time.Duration) *AgentWatcher { + c := client.New() + ec := &AgentWatcher{ + notifyChan: ch, + agentClient: c, + log: log, + checkInterval: checkInterval, + } + return ec +} + +// Run runs the checking loop. +func (ch *AgentWatcher) Run(ctx context.Context) { + ch.log.Info("Agent watcher started") + + ch.connectCounter = 0 + ch.lostCounter = 0 + + // tracking of an error runs in a separate goroutine, because + // the call to `watch.Recv` blocks and a timer is needed + // to determine if an error last longer than the checkInterval. + failedReset := make(chan bool) + failedCh := make(chan error) + go func() { + failedTimer := time.NewTimer(ch.checkInterval) + failedTimer.Stop() // starts stopped + defer failedTimer.Stop() // stopped on exit always + + var flipFlopCount int + var failedErr error + for { + select { + case <-ctx.Done(): + return + case reset := <-failedReset: + if reset { + flipFlopCount = 0 + failedTimer.Stop() + } + case err := <-failedCh: + if err != nil { + if failedErr == nil { + flipFlopCount++ + failedTimer.Reset(ch.checkInterval) + ch.log.Error("Agent reported failure (starting failed timer): %s", err) + } else { + ch.log.Error("Agent reported failure (failed timer already started): %s", err) + } + } else { + if failedErr != nil { + failedTimer.Stop() + ch.log.Error("Agent reported healthy (failed timer stopped): %s", err) + } + } + failedErr = err + if flipFlopCount > statusFailureFlipFlopsAllowed { + err := fmt.Errorf("%w '%d' times in a row", ErrAgentFlipFlopFailed, flipFlopCount) + ch.log.Error(err) + ch.notifyChan <- err + } + case <-failedTimer.C: + if failedErr == nil { + // error was cleared; do nothing + continue + } + // error lasted longer than the checkInterval, notify! + ch.notifyChan <- failedErr + } + } + }() + +LOOP: + for { + ch.lastPid = -1 + connectTimer := time.NewTimer(ch.checkInterval) + select { + case <-ctx.Done(): + connectTimer.Stop() + return + case <-connectTimer.C: + ch.log.Info("Trying to connect to agent") + // block on connection, don't retry connection, and fail on temp dial errors + // always a local connection it should connect quickly so the timeout is only 1 second + connectCtx, connectCancel := context.WithTimeout(ctx, 1*time.Second) + err := ch.agentClient.Connect(connectCtx, grpc.WithBlock(), grpc.WithDisableRetry(), grpc.FailOnNonTempDialError(true)) + connectCancel() + if err != nil { + ch.connectCounter++ + ch.log.Error("Failed connecting to running daemon: ", err) + if ch.checkFailures() { + return + } + // agent is probably not running + continue + } + + stateCtx, stateCancel := context.WithCancel(ctx) + watch, err := ch.agentClient.StateWatch(stateCtx) + if err != nil { + // considered a connect error + stateCancel() + ch.agentClient.Disconnect() + ch.log.Error("Failed to start state watch: ", err) + ch.connectCounter++ + if ch.checkFailures() { + return + } + // agent is probably not running + continue + } + + ch.log.Info("Connected to agent") + + // clear the connectCounter as connection was successfully made + // we don't want a disconnect and a reconnect to be counted with + // the connectCounter that is tracked with the lostCounter + ch.connectCounter = 0 + + // failure is tracked only for the life of the connection to + // the watch streaming protocol. either an error that last longer + // than the checkInterval or to many flopping of error/non-error + // will trigger a reported failure + failedReset <- true + failedCh <- nil + + for { + state, err := watch.Recv() + if err != nil { + // agent has crashed or exited + stateCancel() + ch.agentClient.Disconnect() + ch.log.Error("Lost connection: failed reading next state: ", err) + ch.lostCounter++ + if ch.checkFailures() { + return + } + continue LOOP + } + + // gRPC is good at hiding the fact that connection was lost + // to ensure that we don't miss a restart a changed PID means + // we are now talking to a different spawned Elastic Agent + if ch.lastPid == -1 { + ch.lastPid = state.Info.PID + ch.log.Info(fmt.Sprintf("Communicating with PID %d", ch.lastPid)) + } else if ch.lastPid != state.Info.PID { + ch.log.Error(fmt.Sprintf("Communication with PID %d lost, now communicating with PID %d", ch.lastPid, state.Info.PID)) + ch.lastPid = state.Info.PID + // count the PID change as a lost connection, but allow + // the communication to continue unless has become a failure + ch.lostCounter++ + if ch.checkFailures() { + stateCancel() + ch.agentClient.Disconnect() + return + } + } + + if state.State == client.Failed { + // top-level failure (something is really wrong) + failedCh <- fmt.Errorf("%w: %s", ErrAgentStatusFailed, state.Message) + continue + } else { + // agent is healthy; but a component might not be healthy + // upgrade tracks unhealthy component as an issue with the upgrade + var compErr error + for _, comp := range state.Components { + if comp.State == client.Failed { + compErr = multierror.Append(compErr, fmt.Errorf("component %s[%v] failed: %s", comp.Name, comp.ID, comp.Message)) + } + } + if compErr != nil { + failedCh <- fmt.Errorf("%w: %w", ErrAgentComponentFailed, compErr) + continue + } + } + + // nothing is failed + failedCh <- nil + } + } + } +} + +func (ch *AgentWatcher) checkFailures() bool { + if failures := ch.connectCounter; failures > statusCheckMissesAllowed { + ch.notifyChan <- fmt.Errorf("%w '%d' times in a row", ErrCannotConnect, failures) + return true + } + if failures := ch.lostCounter; failures > statusLossesAllowed { + ch.notifyChan <- fmt.Errorf("%w '%d' times in a row", ErrLostConnection, failures) + return true + } + return false +} diff --git a/internal/pkg/agent/application/upgrade/watcher_test.go b/internal/pkg/agent/application/upgrade/watcher_test.go new file mode 100644 index 00000000000..689a051d420 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/watcher_test.go @@ -0,0 +1,609 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/elastic/elastic-agent/pkg/control/v2/client" + "github.com/elastic/elastic-agent/pkg/control/v2/cproto" + + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func TestWatcher_CannotConnect(t *testing.T) { + // timeout ensures that if it doesn't work; it doesn't block forever + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 1*time.Millisecond) + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + case err := <-errCh: + assert.ErrorIs(t, err, ErrCannotConnect) + } +} + +func TestWatcher_LostConnection(t *testing.T) { + // timeout ensures that if it doesn't work; it doesn't block forever + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 1*time.Millisecond) + + // error on watch (counts as lost connect) + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + return fmt.Errorf("forced error") + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + case err := <-errCh: + assert.ErrorIs(t, err, ErrLostConnection) + } +} + +func TestWatcher_PIDChange(t *testing.T) { + // timeout ensures that if it doesn't work; it doesn't block forever + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 1*time.Millisecond) + + // error on watch (counts as lost connect) + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + // starts with PID 1 + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 1, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // now with PID 2 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 2, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // now with PID 3 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 3, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // now with PID 4 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 4, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + case err := <-errCh: + assert.ErrorIs(t, err, ErrLostConnection) + } +} + +func TestWatcher_PIDChangeSuccess(t *testing.T) { + // test tests for success, which only happens when no error comes in + // during this time period + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 1*time.Millisecond) + + // error on watch (counts as lost connect) + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + // starts with PID 1 + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 1, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // now with PID 2 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 2, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // now with PID 3 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 3, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // still with PID 3 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 3, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // still with PID 3 + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Pid: 3, + }, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.ErrorIs(t, ctx.Err(), context.DeadlineExceeded) + case err := <-errCh: + assert.NoError(t, err, "error should not have been reported") + } +} + +func TestWatcher_AgentError(t *testing.T) { + // timeout ensures that if it doesn't work; it doesn't block forever + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 100*time.Millisecond) + + // reports only an error state, triggers failed + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_FAILED, + Message: "force failure", + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + case err := <-errCh: + assert.ErrorIs(t, err, ErrAgentStatusFailed) + } +} + +func TestWatcher_AgentErrorQuick(t *testing.T) { + // test tests for success, which only happens when no error comes in + // during this time period + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 100*time.Millisecond) + + // reports an error state, followed by a healthy state (should not error) + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_FAILED, + Message: "force failure", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.ErrorIs(t, ctx.Err(), context.DeadlineExceeded) + case err := <-errCh: + assert.NoError(t, err, "error should not have been reported") + } +} + +func TestWatcher_ComponentError(t *testing.T) { + // timeout ensures that if it doesn't work; it doesn't block forever + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 100*time.Millisecond) + + // reports only an error state, triggers failed + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + Components: []*cproto.ComponentState{ + { + Id: "component-0", + Name: "component-0", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + { + Id: "component-1", + Name: "component-1", + State: cproto.State_FAILED, + Message: "force error", + }, + { + Id: "component-2", + Name: "component-2", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + }, + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + case err := <-errCh: + assert.ErrorIs(t, err, ErrAgentComponentFailed) + } +} + +func TestWatcher_ComponentErrorQuick(t *testing.T) { + // test tests for success, which only happens when no error comes in + // during this time period + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 100*time.Millisecond) + + // reports an error state, followed by a healthy state (should not error) + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + Components: []*cproto.ComponentState{ + { + Id: "component-0", + Name: "component-0", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + { + Id: "component-1", + Name: "component-1", + State: cproto.State_FAILED, + Message: "force error", + }, + { + Id: "component-2", + Name: "component-2", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + }, + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + Components: []*cproto.ComponentState{ + { + Id: "component-0", + Name: "component-0", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + { + Id: "component-1", + Name: "component-1", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + { + Id: "component-2", + Name: "component-2", + State: cproto.State_HEALTHY, + Message: "healthy", + }, + }, + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.ErrorIs(t, ctx.Err(), context.DeadlineExceeded) + case err := <-errCh: + assert.NoError(t, err, "error should not have been reported") + } +} + +func TestWatcher_AgentErrorFlipFlop(t *testing.T) { + // timeout ensures that if it doesn't work; it doesn't block forever + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + errCh := make(chan error) + logger, _ := logger.NewTesting("watcher") + w := NewAgentWatcher(errCh, logger, 300*time.Millisecond) + + // reports only an error state, triggers failed + mockHandler := func(srv cproto.ElasticAgentControl_StateWatchServer) error { + err := srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_FAILED, + Message: "force failure", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_FAILED, + Message: "force failure", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_FAILED, + Message: "force failure", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_FAILED, + Message: "force failure", + }) + if err != nil { + return err + } + err = srv.Send(&cproto.StateResponse{ + Info: &cproto.StateAgentInfo{}, + State: cproto.State_HEALTHY, + Message: "healthy", + }) + if err != nil { + return err + } + // keep open until end (exiting will count as a lost connection) + <-ctx.Done() + return nil + } + mock := &mockDaemon{watch: mockHandler} + require.NoError(t, mock.Start()) + defer mock.Stop() + + // set client to mock; before running + w.agentClient = mock.Client() + go w.Run(ctx) + + select { + case <-ctx.Done(): + require.NoError(t, ctx.Err()) + case err := <-errCh: + assert.ErrorIs(t, err, ErrAgentFlipFlopFailed) + } +} + +type mockStateWatch func(srv cproto.ElasticAgentControl_StateWatchServer) error + +type mockDaemon struct { + cproto.UnimplementedElasticAgentControlServer + + port int + server *grpc.Server + + watch mockStateWatch +} + +func (s *mockDaemon) Start(opt ...grpc.ServerOption) error { + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", s.port)) + if err != nil { + return err + } + s.port = lis.Addr().(*net.TCPAddr).Port + srv := grpc.NewServer(opt...) + s.server = srv + cproto.RegisterElasticAgentControlServer(s.server, s) + go func() { + _ = srv.Serve(lis) + }() + return nil +} + +func (s *mockDaemon) Stop() { + if s.server != nil { + s.server.Stop() + s.server = nil + } +} + +func (s *mockDaemon) Client() client.Client { + return client.New(client.WithAddress(fmt.Sprintf("http://localhost:%d", s.port))) +} + +func (s *mockDaemon) StateWatch(_ *cproto.Empty, srv cproto.ElasticAgentControl_StateWatchServer) error { + return s.watch(srv) +} diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index 3cf84035df9..45648752eda 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -105,9 +105,8 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { } errorCheckInterval := cfg.Settings.Upgrade.Watcher.ErrorCheck.Interval - crashCheckInterval := cfg.Settings.Upgrade.Watcher.CrashCheck.Interval ctx := context.Background() - if err := watch(ctx, tilGrace, errorCheckInterval, crashCheckInterval, log); err != nil { + if err := watch(ctx, tilGrace, errorCheckInterval, log); err != nil { log.Error("Error detected proceeding to rollback: %v", err) err = upgrade.Rollback(ctx, log, marker.PrevHash, marker.Hash) if err != nil { @@ -134,9 +133,8 @@ func isWindows() bool { return runtime.GOOS == "windows" } -func watch(ctx context.Context, tilGrace time.Duration, errorCheckInterval, crashCheckInterval time.Duration, log *logger.Logger) error { +func watch(ctx context.Context, tilGrace time.Duration, errorCheckInterval time.Duration, log *logger.Logger) error { errChan := make(chan error) - crashChan := make(chan error) ctx, cancel := context.WithCancel(ctx) @@ -144,21 +142,10 @@ func watch(ctx context.Context, tilGrace time.Duration, errorCheckInterval, cras defer func() { cancel() close(errChan) - close(crashChan) }() - errorChecker, err := upgrade.NewErrorChecker(errChan, log, errorCheckInterval) - if err != nil { - return err - } - - crashChecker, err := upgrade.NewCrashChecker(ctx, crashChan, log, crashCheckInterval) - if err != nil { - return err - } - - go errorChecker.Run(ctx) - go crashChecker.Run(ctx) + agentWatcher := upgrade.NewAgentWatcher(errChan, log, errorCheckInterval) + go agentWatcher.Run(ctx) signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) @@ -182,10 +169,6 @@ WATCHLOOP: case err := <-errChan: log.Errorf("Agent Error detected: %s", err.Error()) return err - // Agent keeps crashing unexpectedly - case err := <-crashChan: - log.Errorf("Agent crash detected: %s", err.Error()) - return err } } diff --git a/internal/pkg/agent/configuration/upgrade.go b/internal/pkg/agent/configuration/upgrade.go index d8528ec38f0..02fa52a695b 100644 --- a/internal/pkg/agent/configuration/upgrade.go +++ b/internal/pkg/agent/configuration/upgrade.go @@ -12,9 +12,6 @@ const ( // interval between checks for new (upgraded) Agent returning an error status. defaultStatusCheckInterval = 30 * time.Second - - // interval between checks for new (upgraded) Agent crashing. - defaultCrashCheckInterval = 10 * time.Second ) // UpgradeConfig is the configuration related to Agent upgrades. @@ -25,7 +22,6 @@ type UpgradeConfig struct { type UpgradeWatcherConfig struct { GracePeriod time.Duration `yaml:"grace_period" config:"grace_period" json:"grace_period"` ErrorCheck UpgradeWatcherCheckConfig `yaml:"error_check" config:"error_check" json:"error_check"` - CrashCheck UpgradeWatcherCheckConfig `yaml:"crash_check" config:"crash_check" json:"crash_check"` } type UpgradeWatcherCheckConfig struct { Interval time.Duration `yaml:"interval" config:"interval" json:"interval"` @@ -38,9 +34,6 @@ func DefaultUpgradeConfig() *UpgradeConfig { ErrorCheck: UpgradeWatcherCheckConfig{ Interval: defaultStatusCheckInterval, }, - CrashCheck: UpgradeWatcherCheckConfig{ - Interval: defaultCrashCheckInterval, - }, }, } } diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index 34e1405c5cd..08617db2fcc 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -11,6 +11,8 @@ import ( "runtime" "strings" + "github.com/kardianos/service" + "github.com/jaypipes/ghw" "github.com/otiai10/copy" @@ -216,6 +218,15 @@ func RestartService(topPath string) error { return nil } +// StatusService returns the status of the service. +func StatusService(topPath string) (service.Status, error) { + svc, err := newService(topPath) + if err != nil { + return service.StatusUnknown, err + } + return svc.Status() +} + // FixPermissions fixes the permissions on the installed system. func FixPermissions(topPath string) error { return fixPermissions(topPath) diff --git a/pkg/control/v1/proto/control_v1.pb.go b/pkg/control/v1/proto/control_v1.pb.go index a99fd51b1ab..59b8b4fb9f0 100644 --- a/pkg/control/v1/proto/control_v1.pb.go +++ b/pkg/control/v1/proto/control_v1.pb.go @@ -4,8 +4,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.4 // source: control_v1.proto // proto namespace/package name is shared with elastic-agent-client diff --git a/pkg/control/v1/proto/control_v1_grpc.pb.go b/pkg/control/v1/proto/control_v1_grpc.pb.go index 43e62f56985..bd4536b5578 100644 --- a/pkg/control/v1/proto/control_v1_grpc.pb.go +++ b/pkg/control/v1/proto/control_v1_grpc.pb.go @@ -4,10 +4,15 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.23.4 // source: control_v1.proto +// proto namespace/package name is shared with elastic-agent-client +// we need to be careful with modifications to avoid name collisions +// proto is here to maintain backward compatibility and cannot be changed. +// elastic-agent-client namespace is likely change after 8.6 + package proto import ( @@ -23,6 +28,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ElasticAgentControl_Version_FullMethodName = "/proto.ElasticAgentControl/Version" + ElasticAgentControl_Status_FullMethodName = "/proto.ElasticAgentControl/Status" + ElasticAgentControl_Restart_FullMethodName = "/proto.ElasticAgentControl/Restart" + ElasticAgentControl_Upgrade_FullMethodName = "/proto.ElasticAgentControl/Upgrade" +) + // ElasticAgentControlClient is the client API for ElasticAgentControl service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -47,7 +59,7 @@ func NewElasticAgentControlClient(cc grpc.ClientConnInterface) ElasticAgentContr func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) { out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Version", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -56,7 +68,7 @@ func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Status", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Status_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -65,7 +77,7 @@ func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) { out := new(RestartResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Restart", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Restart_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +86,7 @@ func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) { out := new(UpgradeResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Upgrade", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Upgrade_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -135,7 +147,7 @@ func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.ElasticAgentControl/Version", + FullMethod: ElasticAgentControl_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Version(ctx, req.(*Empty)) @@ -153,7 +165,7 @@ func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.ElasticAgentControl/Status", + FullMethod: ElasticAgentControl_Status_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) @@ -171,7 +183,7 @@ func _ElasticAgentControl_Restart_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.ElasticAgentControl/Restart", + FullMethod: ElasticAgentControl_Restart_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Restart(ctx, req.(*Empty)) @@ -189,7 +201,7 @@ func _ElasticAgentControl_Upgrade_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.ElasticAgentControl/Upgrade", + FullMethod: ElasticAgentControl_Upgrade_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Upgrade(ctx, req.(*UpgradeRequest)) diff --git a/pkg/control/v2/client/client.go b/pkg/control/v2/client/client.go index c2f593440fc..c3421b7a6b0 100644 --- a/pkg/control/v2/client/client.go +++ b/pkg/control/v2/client/client.go @@ -13,10 +13,11 @@ import ( "sync" "time" - "github.com/elastic/elastic-agent/pkg/control" - "github.com/elastic/elastic-agent/pkg/control/v2/cproto" + "google.golang.org/grpc" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/pkg/control" + "github.com/elastic/elastic-agent/pkg/control/v2/cproto" ) // UnitType is the type of the unit @@ -105,6 +106,7 @@ type AgentStateInfo struct { Commit string `json:"commit" yaml:"commit"` BuildTime string `json:"build_time" yaml:"build_time"` Snapshot bool `json:"snapshot" yaml:"snapshot"` + PID int32 `json:"pid" yaml:"pid"` } // AgentState is the current state of the Elastic Agent. @@ -156,10 +158,11 @@ type DiagnosticComponentResult struct { } // Client communicates to Elastic Agent through the control protocol. -// go:generate mockery --name Client +// +//go:generate mockery --name Client type Client interface { // Connect connects to the running Elastic Agent. - Connect(ctx context.Context) error + Connect(ctx context.Context, opts ...grpc.DialOption) error // Disconnect disconnects from the running Elastic Agent. Disconnect() // Version returns the current version of the running agent. @@ -231,9 +234,9 @@ func New(opts ...Option) Client { } // Connect connects to the running Elastic Agent. -func (c *client) Connect(ctx context.Context) error { +func (c *client) Connect(ctx context.Context, opts ...grpc.DialOption) error { c.ctx, c.cancel = context.WithCancel(ctx) - conn, err := dialContext(ctx, c.address, c.maxMsgSize) + conn, err := dialContext(ctx, c.address, c.maxMsgSize, opts...) if err != nil { return err } @@ -470,6 +473,7 @@ func toState(res *cproto.StateResponse) (*AgentState, error) { Commit: res.Info.Commit, BuildTime: res.Info.BuildTime, Snapshot: res.Info.Snapshot, + PID: res.Info.Pid, }, State: res.State, Message: res.Message, diff --git a/pkg/control/v2/client/dial.go b/pkg/control/v2/client/dial.go index dde7967d360..694c2355d79 100644 --- a/pkg/control/v2/client/dial.go +++ b/pkg/control/v2/client/dial.go @@ -15,17 +15,19 @@ import ( "google.golang.org/grpc/credentials/insecure" ) -func dialContext(ctx context.Context, address string, maxMsgSize int) (*grpc.ClientConn, error) { - return grpc.DialContext( - ctx, - strings.TrimPrefix(address, "unix://"), +func dialContext(ctx context.Context, address string, maxMsgSize int, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), ) + return grpc.DialContext(ctx, address, opts...) } func dialer(ctx context.Context, addr string) (net.Conn, error) { var d net.Dialer - return d.DialContext(ctx, "unix", addr) + if strings.HasPrefix(addr, "http://") { + return d.DialContext(ctx, "tcp", strings.TrimPrefix(addr, "http://")) + } + return d.DialContext(ctx, "unix", strings.TrimPrefix(addr, "unix://")) } diff --git a/pkg/control/v2/client/dial_windows.go b/pkg/control/v2/client/dial_windows.go index 5c890865d57..1bd2336d2bb 100644 --- a/pkg/control/v2/client/dial_windows.go +++ b/pkg/control/v2/client/dial_windows.go @@ -9,6 +9,7 @@ package client import ( "context" "net" + "strings" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -16,16 +17,19 @@ import ( "github.com/elastic/elastic-agent-libs/api/npipe" ) -func dialContext(ctx context.Context, address string, maxMsgSize int) (*grpc.ClientConn, error) { - return grpc.DialContext( - ctx, - address, +func dialContext(ctx context.Context, address string, maxMsgSize int, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), ) + return grpc.DialContext(ctx, address, opts...) } func dialer(ctx context.Context, addr string) (net.Conn, error) { + if strings.HasPrefix(addr, "http://") { + var d net.Dialer + return d.DialContext(ctx, "tcp", strings.TrimPrefix(addr, "http://")) + } return npipe.DialContext(addr)(ctx, "", "") } diff --git a/pkg/control/v2/client/mocks/client.go b/pkg/control/v2/client/mocks/client.go index 20d4a6a9b81..1b6668798a3 100644 --- a/pkg/control/v2/client/mocks/client.go +++ b/pkg/control/v2/client/mocks/client.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.36.0. DO NOT EDIT. package mocks @@ -13,6 +13,8 @@ import ( cproto "github.com/elastic/elastic-agent/pkg/control/v2/cproto" + grpc "google.golang.org/grpc" + mock "github.com/stretchr/testify/mock" ) @@ -72,13 +74,20 @@ func (_c *Client_Configure_Call) RunAndReturn(run func(context.Context, string) return _c } -// Connect provides a mock function with given fields: ctx -func (_m *Client) Connect(ctx context.Context) error { - ret := _m.Called(ctx) +// Connect provides a mock function with given fields: ctx, opts +func (_m *Client) Connect(ctx context.Context, opts ...grpc.DialOption) error { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, ...grpc.DialOption) error); ok { + r0 = rf(ctx, opts...) } else { r0 = ret.Error(0) } @@ -93,13 +102,21 @@ type Client_Connect_Call struct { // Connect is a helper method to define mock.On call // - ctx context.Context -func (_e *Client_Expecter) Connect(ctx interface{}) *Client_Connect_Call { - return &Client_Connect_Call{Call: _e.mock.On("Connect", ctx)} +// - opts ...grpc.DialOption +func (_e *Client_Expecter) Connect(ctx interface{}, opts ...interface{}) *Client_Connect_Call { + return &Client_Connect_Call{Call: _e.mock.On("Connect", + append([]interface{}{ctx}, opts...)...)} } -func (_c *Client_Connect_Call) Run(run func(ctx context.Context)) *Client_Connect_Call { +func (_c *Client_Connect_Call) Run(run func(ctx context.Context, opts ...grpc.DialOption)) *Client_Connect_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + variadicArgs := make([]grpc.DialOption, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(grpc.DialOption) + } + } + run(args[0].(context.Context), variadicArgs...) }) return _c } @@ -109,7 +126,7 @@ func (_c *Client_Connect_Call) Return(_a0 error) *Client_Connect_Call { return _c } -func (_c *Client_Connect_Call) RunAndReturn(run func(context.Context) error) *Client_Connect_Call { +func (_c *Client_Connect_Call) RunAndReturn(run func(context.Context, ...grpc.DialOption) error) *Client_Connect_Call { _c.Call.Return(run) return _c } @@ -613,13 +630,12 @@ func (_c *Client_Version_Call) RunAndReturn(run func(context.Context) (client.Ve return _c } -type mockConstructorTestingTNewClient interface { +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t mockConstructorTestingTNewClient) *Client { +}) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/pkg/control/v2/cproto/control_v2.pb.go b/pkg/control/v2/cproto/control_v2.pb.go index d1a771ae627..143ce906ba5 100644 --- a/pkg/control/v2/cproto/control_v2.pb.go +++ b/pkg/control/v2/cproto/control_v2.pb.go @@ -4,8 +4,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.4 // source: control_v2.proto package cproto @@ -899,6 +899,8 @@ type StateAgentInfo struct { BuildTime string `protobuf:"bytes,4,opt,name=buildTime,proto3" json:"buildTime,omitempty"` // Current running version is a snapshot. Snapshot bool `protobuf:"varint,5,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + // Current running PID. + Pid int32 `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"` } func (x *StateAgentInfo) Reset() { @@ -968,6 +970,13 @@ func (x *StateAgentInfo) GetSnapshot() bool { return false } +func (x *StateAgentInfo) GetPid() int32 { + if x != nil { + return x.Pid + } + return 0 +} + // StateResponse is the current state of Elastic Agent. // Next unused id: 7 type StateResponse struct { @@ -1801,7 +1810,7 @@ var file_control_v2_proto_rawDesc = []byte{ 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x8c, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x9e, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, @@ -1810,174 +1819,175 @@ var file_control_v2_proto_rawDesc = []byte{ 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x85, 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, - 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xdf, - 0x01, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, - 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, - 0x22, 0x6c, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x12, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0xb5, - 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, - 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x85, 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x66, 0x6c, 0x65, 0x65, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x6c, 0x65, 0x65, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0xdf, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x22, 0x6c, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x12, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x22, 0xb5, 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x3f, 0x0a, 0x1a, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x23, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x3f, 0x0a, 0x1a, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, - 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x22, - 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, 0x69, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xd1, - 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, - 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, - 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, - 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, - 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, + 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, + 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, + 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, + 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, + 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, + 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, + 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, + 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, + 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x75, - 0x6e, 0x69, 0x74, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, - 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, - 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, - 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, - 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, - 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, - 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, - 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, - 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, - 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, - 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, - 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, - 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, - 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, - 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x2a, 0x26, 0x0a, 0x1b, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x10, 0x00, 0x32, 0xdf, - 0x04, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x31, - 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, - 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, - 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x62, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, + 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x09, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x42, 0x29, 0x5a, 0x24, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, - 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, + 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, + 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, + 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, + 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, + 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, + 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, + 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, + 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, + 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, + 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, + 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, + 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, + 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, + 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, + 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x2a, 0x26, 0x0a, 0x1b, 0x41, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x10, 0x00, + 0x32, 0xdf, 0x04, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, + 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x52, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x62, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x09, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x42, 0x29, 0x5a, 0x24, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/control/v2/cproto/control_v2_grpc.pb.go b/pkg/control/v2/cproto/control_v2_grpc.pb.go index 32f675c34a1..286fc0bb749 100644 --- a/pkg/control/v2/cproto/control_v2_grpc.pb.go +++ b/pkg/control/v2/cproto/control_v2_grpc.pb.go @@ -4,8 +4,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.23.4 // source: control_v2.proto package cproto @@ -23,6 +23,18 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ElasticAgentControl_Version_FullMethodName = "/cproto.ElasticAgentControl/Version" + ElasticAgentControl_State_FullMethodName = "/cproto.ElasticAgentControl/State" + ElasticAgentControl_StateWatch_FullMethodName = "/cproto.ElasticAgentControl/StateWatch" + ElasticAgentControl_Restart_FullMethodName = "/cproto.ElasticAgentControl/Restart" + ElasticAgentControl_Upgrade_FullMethodName = "/cproto.ElasticAgentControl/Upgrade" + ElasticAgentControl_DiagnosticAgent_FullMethodName = "/cproto.ElasticAgentControl/DiagnosticAgent" + ElasticAgentControl_DiagnosticUnits_FullMethodName = "/cproto.ElasticAgentControl/DiagnosticUnits" + ElasticAgentControl_DiagnosticComponents_FullMethodName = "/cproto.ElasticAgentControl/DiagnosticComponents" + ElasticAgentControl_Configure_FullMethodName = "/cproto.ElasticAgentControl/Configure" +) + // ElasticAgentControlClient is the client API for ElasticAgentControl service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -65,7 +77,7 @@ func NewElasticAgentControlClient(cc grpc.ClientConnInterface) ElasticAgentContr func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) { out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Version", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +86,7 @@ func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts func (c *elasticAgentControlClient) State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) { out := new(StateResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/State", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_State_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -82,7 +94,7 @@ func (c *elasticAgentControlClient) State(ctx context.Context, in *Empty, opts . } func (c *elasticAgentControlClient) StateWatch(ctx context.Context, in *Empty, opts ...grpc.CallOption) (ElasticAgentControl_StateWatchClient, error) { - stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[0], "/cproto.ElasticAgentControl/StateWatch", opts...) + stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[0], ElasticAgentControl_StateWatch_FullMethodName, opts...) if err != nil { return nil, err } @@ -115,7 +127,7 @@ func (x *elasticAgentControlStateWatchClient) Recv() (*StateResponse, error) { func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) { out := new(RestartResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Restart", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Restart_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -124,7 +136,7 @@ func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) { out := new(UpgradeResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Upgrade", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Upgrade_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -133,7 +145,7 @@ func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequ func (c *elasticAgentControlClient) DiagnosticAgent(ctx context.Context, in *DiagnosticAgentRequest, opts ...grpc.CallOption) (*DiagnosticAgentResponse, error) { out := new(DiagnosticAgentResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/DiagnosticAgent", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_DiagnosticAgent_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -141,7 +153,7 @@ func (c *elasticAgentControlClient) DiagnosticAgent(ctx context.Context, in *Dia } func (c *elasticAgentControlClient) DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (ElasticAgentControl_DiagnosticUnitsClient, error) { - stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[1], "/cproto.ElasticAgentControl/DiagnosticUnits", opts...) + stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[1], ElasticAgentControl_DiagnosticUnits_FullMethodName, opts...) if err != nil { return nil, err } @@ -173,7 +185,7 @@ func (x *elasticAgentControlDiagnosticUnitsClient) Recv() (*DiagnosticUnitRespon } func (c *elasticAgentControlClient) DiagnosticComponents(ctx context.Context, in *DiagnosticComponentsRequest, opts ...grpc.CallOption) (ElasticAgentControl_DiagnosticComponentsClient, error) { - stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[2], "/cproto.ElasticAgentControl/DiagnosticComponents", opts...) + stream, err := c.cc.NewStream(ctx, &ElasticAgentControl_ServiceDesc.Streams[2], ElasticAgentControl_DiagnosticComponents_FullMethodName, opts...) if err != nil { return nil, err } @@ -206,7 +218,7 @@ func (x *elasticAgentControlDiagnosticComponentsClient) Recv() (*DiagnosticCompo func (c *elasticAgentControlClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Configure", in, out, opts...) + err := c.cc.Invoke(ctx, ElasticAgentControl_Configure_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -300,7 +312,7 @@ func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Version", + FullMethod: ElasticAgentControl_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Version(ctx, req.(*Empty)) @@ -318,7 +330,7 @@ func _ElasticAgentControl_State_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/State", + FullMethod: ElasticAgentControl_State_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).State(ctx, req.(*Empty)) @@ -357,7 +369,7 @@ func _ElasticAgentControl_Restart_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Restart", + FullMethod: ElasticAgentControl_Restart_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Restart(ctx, req.(*Empty)) @@ -375,7 +387,7 @@ func _ElasticAgentControl_Upgrade_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Upgrade", + FullMethod: ElasticAgentControl_Upgrade_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Upgrade(ctx, req.(*UpgradeRequest)) @@ -393,7 +405,7 @@ func _ElasticAgentControl_DiagnosticAgent_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/DiagnosticAgent", + FullMethod: ElasticAgentControl_DiagnosticAgent_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).DiagnosticAgent(ctx, req.(*DiagnosticAgentRequest)) @@ -453,7 +465,7 @@ func _ElasticAgentControl_Configure_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Configure", + FullMethod: ElasticAgentControl_Configure_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ElasticAgentControlServer).Configure(ctx, req.(*ConfigureRequest)) diff --git a/pkg/control/v2/server/server.go b/pkg/control/v2/server/server.go index 773468d33b1..a130e658f19 100644 --- a/pkg/control/v2/server/server.go +++ b/pkg/control/v2/server/server.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "net" + "os" "time" "github.com/elastic/elastic-agent/pkg/control" @@ -368,6 +369,7 @@ func stateToProto(state *coordinator.State, agentInfo *info.AgentInfo) (*cproto. Commit: release.Commit(), BuildTime: release.BuildTime().Format(control.TimeFormat()), Snapshot: release.Snapshot(), + Pid: int32(os.Getpid()), }, State: state.State, Message: state.Message, diff --git a/testing/integration/upgrade_rollback_test.go b/testing/integration/upgrade_rollback_test.go index 3226ae92a51..02a209e18eb 100644 --- a/testing/integration/upgrade_rollback_test.go +++ b/testing/integration/upgrade_rollback_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/kardianos/service" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -26,6 +28,12 @@ import ( "github.com/elastic/elastic-agent/testing/upgradetest" ) +const reallyFastWatcherCfg = ` +agent.upgrade.watcher: + grace_period: 1m + error_check.interval: 5s +` + // TestStandaloneUpgradeRollback tests the scenario where upgrading to a new version // of Agent fails due to the new Agent binary reporting an unhealthy status. It checks // that the Agent is rolled back to the previous version. @@ -38,28 +46,28 @@ func TestStandaloneUpgradeRollback(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Start at the build version as we want to test the retry - // logic that is in the build. - startFixture, err := define.NewFixture(t, define.Version()) - require.NoError(t, err) - startVersionInfo, err := startFixture.ExecVersion(ctx) - require.NoError(t, err, "failed to get start agent build version info") - - // Upgrade to an old build. - upgradeToVersion, err := upgradetest.PreviousMinor(ctx, define.Version()) + // Upgrade from an old build because the new watcher from the new build will + // be ran. Otherwise the test will run the old watcher from the old build. + upgradeFromVersion, err := upgradetest.PreviousMinor(ctx, define.Version()) require.NoError(t, err) - endFixture, err := atesting.NewFixture( + startFixture, err := atesting.NewFixture( t, - upgradeToVersion, + upgradeFromVersion, atesting.WithFetcher(atesting.ArtifactFetcher()), ) require.NoError(t, err) + startVersionInfo, err := startFixture.ExecVersion(ctx) + require.NoError(t, err, "failed to get start agent build version info") - t.Logf("Testing Elastic Agent upgrade from %s to %s...", define.Version(), upgradeToVersion) + // Upgrade to the build under test. + endFixture, err := define.NewFixture(t, define.Version()) + require.NoError(t, err) + + t.Logf("Testing Elastic Agent upgrade from %s to %s...", upgradeFromVersion, define.Version()) // We need to use the core version in the condition below because -SNAPSHOT is // stripped from the ${agent.version.version} evaluation below. - parsedUpgradeToVersion, err := version.ParseVersion(upgradeToVersion) + endVersion, err := version.ParseVersion(define.Version()) require.NoError(t, err) // Configure Agent with fast watcher configuration and also an invalid @@ -77,7 +85,7 @@ inputs: - condition: '${agent.version.version} == "%s"' type: invalid id: invalid-input -`, parsedUpgradeToVersion.CoreVersion()) +`, endVersion.CoreVersion()) return startFixture.Configure(ctx, []byte(invalidInputPolicy)) } @@ -135,24 +143,24 @@ func TestStandaloneUpgradeRollbackOnRestarts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Start at the build version as we want to test the retry - // logic that is in the build. - startFixture, err := define.NewFixture(t, define.Version()) - require.NoError(t, err) - startVersionInfo, err := startFixture.ExecVersion(ctx) - require.NoError(t, err, "failed to get start agent build version info") - - // Upgrade to an old build. - upgradeToVersion, err := upgradetest.PreviousMinor(ctx, define.Version()) + // Upgrade from an old build because the new watcher from the new build will + // be ran. Otherwise the test will run the old watcher from the old build. + upgradeFromVersion, err := upgradetest.PreviousMinor(ctx, define.Version()) require.NoError(t, err) - endFixture, err := atesting.NewFixture( + startFixture, err := atesting.NewFixture( t, - upgradeToVersion, + upgradeFromVersion, atesting.WithFetcher(atesting.ArtifactFetcher()), ) require.NoError(t, err) + startVersionInfo, err := startFixture.ExecVersion(ctx) + require.NoError(t, err, "failed to get start agent build version info") - t.Logf("Testing Elastic Agent upgrade from %s to %s...", define.Version(), upgradeToVersion) + // Upgrade to the build under test. + endFixture, err := define.NewFixture(t, define.Version()) + require.NoError(t, err) + + t.Logf("Testing Elastic Agent upgrade from %s to %s...", upgradeFromVersion, define.Version()) // Use the post-upgrade hook to bypass the remainder of the PerformUpgrade // because we want to do our own checks for the rollback. @@ -163,7 +171,8 @@ func TestStandaloneUpgradeRollbackOnRestarts(t *testing.T) { err = upgradetest.PerformUpgrade( ctx, startFixture, endFixture, t, - upgradetest.WithPostUpgradeHook(postUpgradeHook)) + upgradetest.WithPostUpgradeHook(postUpgradeHook), + upgradetest.WithCustomWatcherConfig(reallyFastWatcherCfg)) if !errors.Is(err, ErrPostExit) { require.NoError(t, err) } @@ -174,16 +183,42 @@ func TestStandaloneUpgradeRollbackOnRestarts(t *testing.T) { time.Sleep(10 * time.Second) topPath := paths.Top() - t.Logf("Restarting Agent via service to simulate crashing") - err = install.RestartService(topPath) + t.Logf("Stopping agent via service to simulate crashing") + err = install.StopService(topPath) if err != nil && runtime.GOOS == define.Windows && strings.Contains(err.Error(), "The service has not been started.") { // Due to the quick restarts every 10 seconds its possible that this is faster than Windows // can handle. Decrementing restartIdx means that the loop will occur again. t.Logf("Got an allowed error on Windows: %s", err) - restartIdx-- - continue + err = nil } require.NoError(t, err) + + // ensure that it's stopped before starting it again + var status service.Status + var statusErr error + require.Eventuallyf(t, func() bool { + status, statusErr = install.StatusService(topPath) + if statusErr != nil { + return false + } + return status != service.StatusRunning + }, 2*time.Minute, 1*time.Second, "service never fully stopped (status: %v): %s", status, statusErr) + t.Logf("Stopped agent via service to simulate crashing") + + // start it again + t.Logf("Starting agent via service to simulate crashing") + err = install.StartService(topPath) + require.NoError(t, err) + + // ensure that it's started before next loop + require.Eventuallyf(t, func() bool { + status, statusErr = install.StatusService(topPath) + if statusErr != nil { + return false + } + return status == service.StatusRunning + }, 2*time.Minute, 1*time.Second, "service never fully started (status: %v): %s", status, statusErr) + t.Logf("Started agent via service to simulate crashing") } // wait for the agent to be healthy and back at the start version diff --git a/testing/upgradetest/upgrader.go b/testing/upgradetest/upgrader.go index 64dcb2b70ac..93aeed622a5 100644 --- a/testing/upgradetest/upgrader.go +++ b/testing/upgradetest/upgrader.go @@ -27,9 +27,10 @@ type CustomPGP struct { type upgradeOpts struct { sourceURI *string - skipVerify bool - skipDefaultPgp bool - customPgp *CustomPGP + skipVerify bool + skipDefaultPgp bool + customPgp *CustomPGP + customWatcherCfg string preInstallHook func() error postInstallHook func() error @@ -98,6 +99,13 @@ func WithPostUpgradeHook(hook func() error) upgradeOpt { } } +// WithCustomWatcherConfig sets a custom watcher configuration to use. +func WithCustomWatcherConfig(cfg string) upgradeOpt { + return func(opts *upgradeOpts) { + opts.customWatcherCfg = cfg + } +} + // PerformUpgrade performs the upgrading of the Elastic Agent. func PerformUpgrade( ctx context.Context, @@ -126,7 +134,11 @@ func PerformUpgrade( } // start fixture gets the agent configured to use a faster watcher - err = ConfigureFastWatcher(ctx, startFixture) + if upgradeOpts.customWatcherCfg != "" { + err = startFixture.Configure(ctx, []byte(upgradeOpts.customWatcherCfg)) + } else { + err = ConfigureFastWatcher(ctx, startFixture) + } if err != nil { return fmt.Errorf("failed configuring the start agent with faster watcher configuration: %w", err) } diff --git a/testing/upgradetest/watcher.go b/testing/upgradetest/watcher.go index d0505f5992d..fd34dc82a7f 100644 --- a/testing/upgradetest/watcher.go +++ b/testing/upgradetest/watcher.go @@ -16,7 +16,7 @@ import ( // FastWatcherCfg is configuration that makes the watcher run faster. const FastWatcherCfg = ` -agent.upgradetest.watcher: +agent.upgrade.watcher: grace_period: 1m error_check.interval: 15s crash_check.interval: 15s From 91963200744780654f4448a3c3e0a4da99419ac2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 19 Oct 2023 10:31:59 +0200 Subject: [PATCH 07/15] [Hints Support] Add default host (#3575) * Add default host. * Add changelog. * Update const. * Add casting error * Add casting error --- .../1696935756-add-default-host.yaml | 32 +++++ .../composable/providers/kubernetes/hints.go | 118 ++++++++-------- .../providers/kubernetes/hints_test.go | 131 ++++++++++++++++++ .../composable/providers/kubernetes/pod.go | 4 - 4 files changed, 220 insertions(+), 65 deletions(-) create mode 100644 changelog/fragments/1696935756-add-default-host.yaml diff --git a/changelog/fragments/1696935756-add-default-host.yaml b/changelog/fragments/1696935756-add-default-host.yaml new file mode 100644 index 00000000000..a497170f3e7 --- /dev/null +++ b/changelog/fragments/1696935756-add-default-host.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Hints Autodiscovery for Elastic Agent - Add default host for each container in a pod + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/3575 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/1453 diff --git a/internal/pkg/composable/providers/kubernetes/hints.go b/internal/pkg/composable/providers/kubernetes/hints.go index 0a439afd5da..8557007c87e 100644 --- a/internal/pkg/composable/providers/kubernetes/hints.go +++ b/internal/pkg/composable/providers/kubernetes/hints.go @@ -16,9 +16,10 @@ import ( ) const ( + hints = "hints" + integration = "package" datastreams = "data_streams" - host = "host" period = "period" timeout = "timeout" @@ -26,6 +27,7 @@ const ( username = "username" password = "password" stream = "stream" // this is the container stream: stdout/stderr + processors = "processors" ) type hintsBuilder struct { @@ -250,83 +252,77 @@ func GenerateHintsMapping(hints mapstr.M, kubeMeta mapstr.M, logger *logp.Logger return hintsMapping } -// Generates the hints and processor mappings from provided pod annotation map +// GetHintsMapping Generates the hints and processor mappings from provided pod annotation map func GetHintsMapping(k8sMapping map[string]interface{}, logger *logp.Logger, prefix string, cID string) hintsData { hintData := hintsData{ composableMapping: mapstr.M{}, processors: []mapstr.M{}, } - var hints mapstr.M - var containerProcessors []mapstr.M - - if ann, ok := k8sMapping["annotations"]; ok { - annotations, _ := ann.(mapstr.M) - - if containerEntries, err := annotations.GetValue(prefix + ".hints"); err == nil { - entries, ok := containerEntries.(mapstr.M) - if ok && len(entries) > 0 { - for key := range entries { - parts := strings.Split(key, "/") - - if len(parts) > 1 { - if con, ok := k8sMapping["container"]; ok { - containers, ok := con.(mapstr.M) - if ok { - if cname, err := containers.GetValue("name"); err == nil { - if parts[0] == cname { - // If there are hints like co.elastic.hints./ then add the values after the / to the corresponding container - // Eg Annotation "co.elastic.hints.nginx/stream: stderr" will create a hints entry for container nginx - hints, containerProcessors = GenerateHintsForContainer(annotations, parts[0], prefix) - } - } - } - } - } + cName := "" + cHost := "" + + ann, ok := k8sMapping["annotations"] + if !ok { + return hintData + } + annotations, ok := ann.(mapstr.M) + if !ok { + return hintData + } + + // Get the name of the container from the metadata. We need it to extract the hints that affect it directly. + // E.g. co.elastic.hints./host: "..." + if con, ok := k8sMapping["container"]; ok { + if containers, ok := con.(mapstr.M); ok { + if name, err := containers.GetValue("name"); err == nil { + if nameString, ok := name.(string); ok { + cName = nameString + } + } + if cPort, err := containers.GetValue("port"); err == nil { + // This is the default for the host value of a specific container. + if portString, ok := cPort.(string); ok { + cHost = "${kubernetes.pod.ip}:" + portString } } - } else { - // If there are top level hints like co.elastic.hints/ then just add the values after the / - // Eg. Annotation "co.elastic.hints/stream: stderr" will will create a hints entries for all containers in the pod - hints = utils.GenerateHints(annotations, "", prefix) } - logger.Debugf("Extracted hints are :%v", hints) + } - if len(hints) > 0 { - hintData = GenerateHintsResult(hints, k8sMapping, annotations, logger, prefix, cID) + hintsExtracted := utils.GenerateHints(annotations, cName, prefix) + if len(hintsExtracted) == 0 { + return hintData + } - // Only if there are processors defined in a specific container we append them to the processors of the top level - if len(containerProcessors) > 0 { - hintData.processors = append(hintData.processors, containerProcessors...) + // Check if host exists. Otherwise, add default entry for it. + if cHost != "" { + hintsValues, ok := hintsExtracted[hints] + if ok { + if hintsHostValues, ok := hintsValues.(mapstr.M); ok { + if _, ok := hintsHostValues[host]; !ok { + hintsHostValues[host] = cHost + } + } + } else { + hintsExtracted[hints] = mapstr.M{ + host: cHost, } - logger.Debugf("Generated Processors mapping :%v", hintData.processors) } } - return hintData -} - -// Generates hints and processors list for specific containers -func GenerateHintsForContainer(annotations mapstr.M, parts, prefix string) (mapstr.M, []mapstr.M) { - hints := utils.GenerateHints(annotations, parts, prefix) - // Processors for specific container - // We need to make an extra check if we have processors added only to the specific containers - containerProcessors := utils.GetConfigs(annotations, prefix, "hints."+parts+"/processors") + logger.Debugf("Extracted hints are :%v", hintsExtracted) - return hints, containerProcessors -} - -// Generates the final hintData (hints and processors) struct that will be emitted in pods. -func GenerateHintsResult(hints mapstr.M, k8sMapping map[string]interface{}, annotations mapstr.M, logger *logp.Logger, prefix, cID string) hintsData { - hintData := hintsData{ - composableMapping: mapstr.M{}, - processors: []mapstr.M{}, - } - - hintData.composableMapping = GenerateHintsMapping(hints, k8sMapping, logger, cID) + hintData.composableMapping = GenerateHintsMapping(hintsExtracted, k8sMapping, logger, cID) logger.Debugf("Generated hints mappings :%v", hintData.composableMapping) - // Eg co.elastic.hints/processors.decode_json_fields.fields: "message" will add a processor in all containers of pod - hintData.processors = utils.GetConfigs(annotations, prefix, processorhints) + hintData.processors = utils.GetConfigs(annotations, prefix, hints+"/"+processors) + // We need to check the processors for the specific container, if they exist. + if cName != "" { + containerProcessors := utils.GetConfigs(annotations, prefix, hints+"."+cName+"/"+processors) + if len(containerProcessors) > 0 { + hintData.processors = append(hintData.processors, containerProcessors...) + } + } + logger.Debugf("Generated Processors mapping :%v", hintData.processors) return hintData } diff --git a/internal/pkg/composable/providers/kubernetes/hints_test.go b/internal/pkg/composable/providers/kubernetes/hints_test.go index 17e36b0d7c7..0cc709acaf7 100644 --- a/internal/pkg/composable/providers/kubernetes/hints_test.go +++ b/internal/pkg/composable/providers/kubernetes/hints_test.go @@ -591,3 +591,134 @@ func TestGenerateHintsMappingWithProcessorsForContainer(t *testing.T) { assert.Contains(t, expectedprocesors, hintData.processors[1]) } } + +func TestDefaultHost(t *testing.T) { + logger := getLogger() + cID := "abcd" + + mapping := map[string]interface{}{ + "namespace": "testns", + "pod": mapstr.M{ + "uid": string(types.UID(uid)), + "name": "testpod", + "ip": "127.0.0.5", + }, + "annotations": mapstr.M{ + "app": "production", + "co": mapstr.M{ + "elastic": mapstr.M{ + "hints/package": "redis", + "hints": mapstr.M{ + "redis-1/host": "${kubernetes.pod.ip}:6379", + "redis-1/stream": "stderr", + "redis-2/host": "${kubernetes.pod.ip}:6400", + "redis-4/stream": "stderr", + }, + }, + }, + }, + } + + addContainerMapping := func(mapping map[string]interface{}, container mapstr.M) map[string]interface{} { + clone := make(map[string]interface{}, len(mapping)) + for k, v := range mapping { + clone[k] = v + } + clone["container"] = container + return clone + } + + tests := []struct { + msg string + mapping map[string]interface{} + expected mapstr.M + }{ + { + msg: "Test container with two hints (redis-1), of which one is host.", + mapping: addContainerMapping(mapping, + mapstr.M{ + "name": "redis-1", + "port": "6379", + "id": cID, + }, + ), + expected: mapstr.M{ + "container_id": cID, + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "enabled": true, + "host": "127.0.0.5:6379", + "stream": "stderr", + }, + }, + }, + { + msg: "Test container with only one hint for host (redis-2).", + mapping: addContainerMapping(mapping, + mapstr.M{ + "name": "redis-2", + "port": "6400", + "id": cID, + }, + ), + expected: mapstr.M{ + "container_id": cID, + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "enabled": true, + "host": "127.0.0.5:6400", + }, + }, + }, + { + msg: "Test container without hints and check for the default host (redis-3).", + mapping: addContainerMapping(mapping, + mapstr.M{ + "name": "redis-3", + "port": "7000", + "id": cID, + }, + ), + expected: mapstr.M{ + "container_id": cID, + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "enabled": true, + "host": "127.0.0.5:7000", + }, + }, + }, + { + msg: "Test container with one hint for stream and without port defined (redis-4).", + mapping: addContainerMapping(mapping, + mapstr.M{ + "name": "redis-4", + "id": cID, + }, + ), + expected: mapstr.M{ + "container_id": cID, + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "enabled": true, + "stream": "stderr", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.msg, func(t *testing.T) { + hintData := GetHintsMapping(test.mapping, logger, "co.elastic", cID) + assert.Equal(t, test.expected, hintData.composableMapping) + }) + } +} diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index 1769e793183..ffe817218e7 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -24,10 +24,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/composable" ) -const ( - processorhints = "hints/processors" -) - type pod struct { watcher kubernetes.Watcher nodeWatcher kubernetes.Watcher From 96d46f9073d95a6fe79274f006efe43860f9c78f Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 19 Oct 2023 17:17:28 +0200 Subject: [PATCH 08/15] Enable log errors check test and filter for acceptable errors (#3616) --- .../application/dispatcher/dispatcher.go | 2 +- .../gateway/fleet/fleet_gateway.go | 2 +- .../agent/application/monitoring/server.go | 2 +- .../pkg/agent/storage/store/action_store.go | 2 +- .../pkg/agent/storage/store/state_store.go | 2 +- internal/pkg/capabilities/upgrade.go | 2 +- .../pkg/fleetapi/acker/lazy/lazy_acker.go | 2 +- pkg/testing/tools/estools/elasticsearch.go | 7 +++++ testing/integration/monitoring_logs_test.go | 26 ++++++++++++++++--- 9 files changed, 36 insertions(+), 11 deletions(-) diff --git a/internal/pkg/agent/application/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go index a4ec47a96fe..92ac050f9ab 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -248,7 +248,7 @@ func (ad *ActionDispatcher) scheduleRetry(ctx context.Context, action fleetapi.R attempt := action.RetryAttempt() d, err := ad.rt.GetWait(attempt) if err != nil { - ad.log.Errorf("No more reties for action id %s: %v", action.ID(), err) + ad.log.Errorf("No more retries for action id %s: %v", action.ID(), err) action.SetRetryAttempt(-1) if err := acker.Ack(ctx, action); err != nil { ad.log.Errorf("Unable to ack action failure (id %s) to fleet-server: %v", action.ID(), err) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 31c81955a10..000ec534bf2 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -217,7 +217,7 @@ func (f *FleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee if f.checkinFailCounter > 0 { // Log at same level as error logs above so subsequent successes are visible when log level is set to 'error'. - f.log.Errorf("Checkin request to fleet-server succeeded after %d failures", f.checkinFailCounter) + f.log.Warnf("Checkin request to fleet-server succeeded after %d failures", f.checkinFailCounter) } f.checkinFailCounter = 0 diff --git a/internal/pkg/agent/application/monitoring/server.go b/internal/pkg/agent/application/monitoring/server.go index 390a472d5ed..47561d29e49 100644 --- a/internal/pkg/agent/application/monitoring/server.go +++ b/internal/pkg/agent/application/monitoring/server.go @@ -35,7 +35,7 @@ func NewServer( ) (*api.Server, error) { if err := createAgentMonitoringDrop(endpointConfig.Host); err != nil { // log but ignore - log.Errorf("failed to create monitoring drop: %v", err) + log.Warnf("failed to create monitoring drop: %v", err) } cfg, err := config.NewConfigFrom(endpointConfig) diff --git a/internal/pkg/agent/storage/store/action_store.go b/internal/pkg/agent/storage/store/action_store.go index ea0b2eb3c8b..4fc9df8b485 100644 --- a/internal/pkg/agent/storage/store/action_store.go +++ b/internal/pkg/agent/storage/store/action_store.go @@ -33,7 +33,7 @@ func newActionStore(log *logger.Logger, store storeLoad) (*actionStore, error) { // and return an empty store. reader, err := store.Load() if err != nil { - log.Errorf("failed to load action store, returning empty contents: %v", err.Error()) + log.Warnf("failed to load action store, returning empty contents: %v", err.Error()) return &actionStore{log: log, store: store}, nil } defer reader.Close() diff --git a/internal/pkg/agent/storage/store/state_store.go b/internal/pkg/agent/storage/store/state_store.go index 6f64f1184bf..3e794c3547b 100644 --- a/internal/pkg/agent/storage/store/state_store.go +++ b/internal/pkg/agent/storage/store/state_store.go @@ -95,7 +95,7 @@ func NewStateStore(log *logger.Logger, store storeLoad) (*StateStore, error) { // and return an empty store. reader, err := store.Load() if err != nil { - log.Errorf("failed to load state store, returning empty contents: %v", err.Error()) + log.Warnf("failed to load state store, returning empty contents: %v", err.Error()) return &StateStore{log: log, store: store}, nil } defer reader.Close() diff --git a/internal/pkg/capabilities/upgrade.go b/internal/pkg/capabilities/upgrade.go index 07866ec111e..0f7b19babf8 100644 --- a/internal/pkg/capabilities/upgrade.go +++ b/internal/pkg/capabilities/upgrade.go @@ -65,7 +65,7 @@ func allowUpgrade( for _, cap := range upgradeCaps { result, err := cap.condition.Eval(varStore, true) if err != nil { - log.Errorf("failed evaluating eql formula %q, skipping: %v", cap.conditionStr, err) + log.Warnf("failed evaluating eql formula %q, skipping: %v", cap.conditionStr, err) continue } if result { diff --git a/internal/pkg/fleetapi/acker/lazy/lazy_acker.go b/internal/pkg/fleetapi/acker/lazy/lazy_acker.go index 298b2b5bf7f..65f7bdc1cac 100644 --- a/internal/pkg/fleetapi/acker/lazy/lazy_acker.go +++ b/internal/pkg/fleetapi/acker/lazy/lazy_acker.go @@ -87,7 +87,7 @@ func (f *Acker) Commit(ctx context.Context) (err error) { // If request failed enqueue all actions with retrier if it is set if err != nil { if f.retrier != nil { - f.log.Errorf("lazy acker: failed ack batch, enqueue for retry: %s", actions) + f.log.Warnf("lazy acker: failed ack batch, enqueue for retry: %s", actions) f.retrier.Enqueue(actions) return nil } diff --git a/pkg/testing/tools/estools/elasticsearch.go b/pkg/testing/tools/estools/elasticsearch.go index 8cd6e126597..ca6dad2dba4 100644 --- a/pkg/testing/tools/estools/elasticsearch.go +++ b/pkg/testing/tools/estools/elasticsearch.go @@ -201,6 +201,13 @@ func CheckForErrorsInLogsWithContext(ctx context.Context, client elastictranspor "log.level": "error", }, }, + { + "term": map[string]interface{}{ + "data_stream.namespace": map[string]interface{}{ + "value": namespace, + }, + }, + }, }, "must_not": excludeStatements, }, diff --git a/testing/integration/monitoring_logs_test.go b/testing/integration/monitoring_logs_test.go index 97836c7ff3f..c52b2150d47 100644 --- a/testing/integration/monitoring_logs_test.go +++ b/testing/integration/monitoring_logs_test.go @@ -38,7 +38,6 @@ func TestMonitoringLogsShipped(t *testing.T) { ctx := context.Background() t.Logf("got namespace: %s", info.Namespace) - t.Skip("Test is flaky; see https://github.com/elastic/elastic-agent/issues/3081") agentFixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) @@ -90,7 +89,7 @@ func TestMonitoringLogsShipped(t *testing.T) { require.NotZero(t, len(docs.Hits.Hits)) t.Logf("metricbeat: Got %d documents", len(docs.Hits.Hits)) - // Stage 4: make sure all components are health + // Stage 4: make sure all components are healthy t.Log("Making sure all components are healthy") status, err := agentFixture.ExecStatus(ctx) require.NoError(t, err, @@ -101,7 +100,26 @@ func TestMonitoringLogsShipped(t *testing.T) { c.Name, client.Healthy, client.State(c.State)) } - // Stage 5: Make sure we have message confirming central management is running + // Stage 5: Make sure there are no errors in logs + t.Log("Making sure there are no error logs") + docs = findESDocs(t, func() (estools.Documents, error) { + return estools.CheckForErrorsInLogs(info.ESClient, info.Namespace, []string{ + // acceptable error messages (include reason) + "Error dialing dial tcp 127.0.0.1:9200: connect: connection refused", // beat is running default config before its config gets updated + "Global configuration artifact is not available", // Endpoint: failed to load user artifact due to connectivity issues + "Failed to download artifact", + "Failed to initialize artifact", + "Failed to apply initial policy from on disk configuration", + "elastic-agent-client error: rpc error: code = Canceled desc = context canceled", // can happen on restart + }) + }) + t.Logf("errors: Got %d documents", len(docs.Hits.Hits)) + for _, doc := range docs.Hits.Hits { + t.Logf("%#v", doc.Source) + } + require.Empty(t, docs.Hits.Hits) + + // Stage 6: Make sure we have message confirming central management is running t.Log("Making sure we have message confirming central management is running") docs = findESDocs(t, func() (estools.Documents, error) { return estools.FindMatchingLogLines(info.ESClient, info.Namespace, @@ -109,7 +127,7 @@ func TestMonitoringLogsShipped(t *testing.T) { }) require.NotZero(t, len(docs.Hits.Hits)) - // Stage 6: verify logs from the monitoring components are not sent to the output + // Stage 7: verify logs from the monitoring components are not sent to the output t.Log("Check monitoring logs") hostname, err := os.Hostname() if err != nil { From bfb2c70559344ea10ef73f6a0c835122207a0d97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:08:29 -0400 Subject: [PATCH 09/15] Bump github.com/elastic/go-elasticsearch/v8 from 8.10.0 to 8.10.1 (#3599) * Bump github.com/elastic/go-elasticsearch/v8 from 8.10.0 to 8.10.1 Bumps [github.com/elastic/go-elasticsearch/v8](https://github.com/elastic/go-elasticsearch) from 8.10.0 to 8.10.1. - [Release notes](https://github.com/elastic/go-elasticsearch/releases) - [Changelog](https://github.com/elastic/go-elasticsearch/blob/main/CHANGELOG.md) - [Commits](https://github.com/elastic/go-elasticsearch/compare/v8.10.0...v8.10.1) --- updated-dependencies: - dependency-name: github.com/elastic/go-elasticsearch/v8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 2ac1bfb9926..3290e34d2e5 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1799,11 +1799,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.10.0 +Version: v8.10.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.10.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.10.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index e6e2a1cdd67..48c55e7e5a7 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/elastic/elastic-agent-libs v0.6.0 github.com/elastic/elastic-agent-system-metrics v0.7.0 github.com/elastic/elastic-transport-go/v8 v8.3.0 - github.com/elastic/go-elasticsearch/v8 v8.10.0 + github.com/elastic/go-elasticsearch/v8 v8.10.1 github.com/elastic/go-licenser v0.4.1 github.com/elastic/go-sysinfo v1.11.1 github.com/elastic/go-ucfg v0.8.6 diff --git a/go.sum b/go.sum index 41b12f0955f..d176fb612e9 100644 --- a/go.sum +++ b/go.sum @@ -787,13 +787,12 @@ github.com/elastic/elastic-agent-system-metrics v0.7.0 h1:qDLY30UDforSd/TfHfqUDi github.com/elastic/elastic-agent-system-metrics v0.7.0/go.mod h1:9C1UEfj0P687HAzZepHszN6zXA+2tN2Lx3Osvq1zby8= github.com/elastic/elastic-integration-corpus-generator-tool v0.5.0/go.mod h1:uf9N86y+UACGybdEhZLpwZ93XHWVhsYZAA4c2T2v6YM= github.com/elastic/elastic-package v0.77.0/go.mod h1:Xeqx0OOVnKBfFoSHsHmKI74RxgRGiDhU6yXEu8BkJJM= -github.com/elastic/elastic-transport-go/v8 v8.0.0-20230329154755-1a3c63de0db6/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo= github.com/elastic/elastic-transport-go/v8 v8.3.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= github.com/elastic/go-elasticsearch/v7 v7.17.7/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= -github.com/elastic/go-elasticsearch/v8 v8.10.0 h1:ALg3DMxSrx07YmeMNcfPf7cFh1Ep2+Qa19EOXTbwr2k= -github.com/elastic/go-elasticsearch/v8 v8.10.0/go.mod h1:NGmpvohKiRHXI0Sw4fuUGn6hYOmAXlyCphKpzVBiqDE= +github.com/elastic/go-elasticsearch/v8 v8.10.1 h1:JJ3i2DimYTsJcUoEGbg6tNB0eehTNdid9c5kTR1TGuI= +github.com/elastic/go-elasticsearch/v8 v8.10.1/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= github.com/elastic/go-licenser v0.4.1 h1:1xDURsc8pL5zYT9R29425J3vkHdt4RT5TNEMeRN48x4= From e43be2aca62dac5d09b89c2357e511140f86d04f Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Thu, 19 Oct 2023 19:58:10 +0200 Subject: [PATCH 10/15] Remove PGP signature verification skip for DEV builds (#3590) * remove PGP signature verification skip for DEV builds * create pgptest package to sign and give the public key to verify the signature * fix tests that relied on skipping the PGP verification * add PGP/GPG how-to on docs * add test for VerifySHA512HashWithCleanup --- dev-tools/cmd/buildpgp/build_pgp.go | 6 +- docs/pgp-sign-verify-artifact.md | 176 +++++++++++++++ .../application/coordinator/coordinator.go | 4 +- .../artifact/download/fs/downloader.go | 32 +-- ... elastic-agent-8.0.0-darwin-x86_64.tar.gz} | 0 ...astic-agent-8.0.0-darwin-x86_64.tar.gz.asc | 14 ++ ...c-agent-8.0.0-darwin-x86_64.tar.gz.sha512} | 2 +- .../download/fs/testdata/drop/public-key.pgp | 40 ++++ .../upgrade/artifact/download/fs/verifier.go | 101 +++------ .../artifact/download/fs/verifier_test.go | 147 +++++++----- .../artifact/download/http/common_test.go | 121 ++++++++++ .../artifact/download/http/downloader.go | 1 - .../artifact/download/http/downloader_test.go | 42 ++++ .../artifact/download/http/elastic_test.go | 211 ------------------ .../artifact/download/http/verifier.go | 100 +++------ .../artifact/download/http/verifier_test.go | 81 +++++++ .../artifact/download/localremote/verifier.go | 8 +- .../artifact/download/snapshot/verifier.go | 4 +- .../upgrade/artifact/download/verifier.go | 107 +++++++-- .../artifact/download/verifier_test.go | 85 +++++++ .../application/upgrade/step_download.go | 10 +- internal/pkg/agent/cmd/run.go | 4 - internal/pkg/release/pgp.go | 6 +- internal/pkg/release/version.go | 4 - testing/integration/upgrade_gpg_test.go | 8 +- testing/pgptest/pgp.go | 42 ++++ 26 files changed, 882 insertions(+), 474 deletions(-) create mode 100644 docs/pgp-sign-verify-artifact.md rename internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/{beat-8.0.0-darwin-x86_64.tar.gz => elastic-agent-8.0.0-darwin-x86_64.tar.gz} (100%) create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc rename internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/{beat-8.0.0-darwin-x86_64.tar.gz.sha512 => elastic-agent-8.0.0-darwin-x86_64.tar.gz.sha512} (74%) create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/public-key.pgp create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go delete mode 100644 internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go create mode 100644 testing/pgptest/pgp.go diff --git a/dev-tools/cmd/buildpgp/build_pgp.go b/dev-tools/cmd/buildpgp/build_pgp.go index 8559ea04c32..659fc1acbd1 100644 --- a/dev-tools/cmd/buildpgp/build_pgp.go +++ b/dev-tools/cmd/buildpgp/build_pgp.go @@ -49,9 +49,9 @@ func init() { pgpBytes = packer.MustUnpack("{{ .Pack }}")["GPG-KEY-elasticsearch"] } -// PGP return pgpbytes and a flag describing whether or not no pgp is valid. -func PGP() (bool, []byte) { - return allowEmptyPgp == "true", pgpBytes +// PGP return pgpbytes. +func PGP() []byte { + return pgpBytes } `)) diff --git a/docs/pgp-sign-verify-artifact.md b/docs/pgp-sign-verify-artifact.md new file mode 100644 index 00000000000..9f8f1295a89 --- /dev/null +++ b/docs/pgp-sign-verify-artifact.md @@ -0,0 +1,176 @@ +# Signing Elastic Agent artifacts + +This doc covers generating a key, exporting the public key, signing a file and verifying it using GPG as well as pure Go. + +Full GPG docs: https://www.gnupg.org/documentation/manuals/gnupg/OpenPGP-Key-Management.html + + +## Go + +```go +package main + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/armor" +) + +func main() { + dir, err := os.MkdirTemp(os.TempDir(), "pgp-") + NoError(err, "could not create directory to save the files to") + + key := filepath.Join(dir, "key") + keyPub := filepath.Join(dir, "key.pub") + asc := filepath.Join(dir, "plaindata.asc") + + fmt.Printf("Writing files to %q\n", dir) + + data := []byte("some data") + plaindata := filepath.Join(dir, "plaindata") + err = os.WriteFile(plaindata, data, 0o600) + NoError(err, "could not write plain data file") + + fmt.Printf("wrote %q\n", plaindata) + + // Create files + fKeyPub, err := os.OpenFile( + keyPub, + os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + NoError(err, "could not create %q file", keyPub) + defer func() { + if err := fKeyPub.Close(); err != nil { + fmt.Printf("failed closing %q\n", fKeyPub.Name()) + } + fmt.Printf("wrote %q\n", fKeyPub.Name()) + }() + + fKey, err := os.OpenFile( + key, + os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + NoError(err, "could not create %q file", key) + defer func() { + if err := fKey.Close(); err != nil { + fmt.Printf("failed closing %q\n", fKey.Name()) + } + fmt.Printf("wrote %q\n", fKey.Name()) + }() + + fasc, err := os.OpenFile( + asc, + os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + NoError(err, "could not create %q file", asc) + defer func() { + if err := fasc.Close(); err != nil { + fmt.Printf("failed closing %q\n", fasc.Name()) + } + fmt.Printf("wrote %q\n", fasc.Name()) + }() + + // Generate PGP key + entity, err := openpgp.NewEntity("someKeyName", "", "", nil) + + // Create an ASCII armored encoder to serialize the private key + wPubKey, err := armor.Encode(fKeyPub, openpgp.PublicKeyType, nil) + NoError(err, "could not create PGP ASCII Armor encoder for public key") + defer func() { + err := wPubKey.Close() + if err != nil { + fmt.Println("failed closing private key writer") + } + }() + + // Writes the public key to the io.Writer passed to armor.Encode. + // Use entity.SerializePrivate if you need the private key. + err = entity.Serialize(wPubKey) + NoError(err, "could not serialize the public key") + + // Create an ASCII armored encoder to serialize the private key + wPrivKey, err := armor.Encode(fKey, openpgp.PrivateKeyType, nil) + NoError(err, "could not create PGP ASCII Armor encoder for private key") + defer func() { + err := wPrivKey.Close() + if err != nil { + fmt.Println("failed closing private key writer") + } + }() + + // Writes the private key to the io.Writer passed to armor.Encode. + // Use entity.SerializePrivate if you need the private key. + err = entity.SerializePrivate(wPrivKey, nil) + NoError(err, "could not serialize the private key") + + // Sign data and write the detached signature to fasc + err = openpgp.ArmoredDetachSign(fasc, entity, bytes.NewReader(data), nil) + NoError(err, "failed signing date") +} + +func NoError(err error, msg string, args ...any) { + if err != nil { + panic(fmt.Sprintf(msg+": %v", append(args, err))) + } +} +``` + +## GPG +### Generate a key + +```shell +gpg --no-default-keyring --keyring ./some-file-to-be-the-key-ring --quick-generate-key atest rsa2048 default none +``` +Where: + - `--no-default-keyring`: do not use your keyring + - `--keyring ./some-file-to-be-the-key-ring`: keyring to use, as the file do not exist, it'll create it + - `--quick-generate-key`: quick generate the key + - `atest`: user-id, a.k.a the key identifier + - `rsa2048`: algorithm to use + - `default`: "usage" for the key. Just use default + - `none`: key expiration + + +### Export the public key +```shell +gpg --no-default-keyring --keyring ./some-file-to-be-the-key-ring --armor --output public-key.pgp --export atest +``` +Where: +- `--no-default-keyring`: do not use your keyring + - `--keyring ./some-file-to-be-the-key-ring`: the keyring to use, created in the previous step + - `--armor`: create ASCII armoured output. Otherwise, it's a binary format + - `--output public-key.pgp`: the output file + - `--export`: export the public key + - `atest`: the key identifier + +### Sing the file +```shell +gpg --no-default-keyring --keyring ./some-file-to-be-the-key-ring -a -o elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc --detach-sign elastic-agent-8.0.0-darwin-x86_64.tar.gz +``` + +Where: + - `-a -o`: --armored, --output + - `elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc`: the output file + - `--detach-sign`: generate a separated file for signature + - `elastic-agent-8.0.0-darwin-x86_64.tar.gz`: the file to sign + + + +### Verify the file + +#### Import the public key +```shell +gpg --no-default-keyring --keyring ./new-keyring --import public-key.pgp +``` +Where: + - `--import`: import a key + - `public-key.pgp`: the key to import + +#### Verify the signature using the imported key +```shell +gpg --no-default-keyring --keyring ./new-keyring --verify elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc +``` +Where: + - `--verify`: verify a signature + - `elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc`: the detached signature file. It'll assume the file to be verified is `elastic-agent-8.0.0-darwin-x86_64.tar.gz` diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index e6a2dbc182f..a17d3bf6199 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -108,7 +108,7 @@ type RuntimeManager interface { // it performs diagnostics for all current units. PerformDiagnostics(context.Context, ...runtime.ComponentUnitDiagnosticRequest) []runtime.ComponentUnitDiagnostic - //PerformComponentDiagnostics executes the diagnostic action for the provided components. If no components are provided, + // PerformComponentDiagnostics executes the diagnostic action for the provided components. If no components are provided, // then it performs the diagnostics for all current units. PerformComponentDiagnostics(ctx context.Context, additionalMetrics []cproto.AdditionalDiagnosticRequest, req ...component.Component) ([]runtime.ComponentDiagnostic, error) } @@ -415,7 +415,7 @@ func (c *Coordinator) ReExec(callback reexec.ShutdownCallbackFn, argOverrides .. // Upgrade runs the upgrade process. // Called from external goroutines. func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) error { - // early check outside of upgrader before overridding the state + // early check outside of upgrader before overriding the state if !c.upgradeMgr.Upgradeable() { return ErrNotUpgradable } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go index 42cc058c16b..6de72f0143e 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go @@ -52,32 +52,34 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version }() // download from source to dest - path, err := e.download(e.config.OS(), a, version) + path, err := e.download(e.config.OS(), a, version, "") downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(e.config.OS(), a, version) + hashPath, err := e.download(e.config.OS(), a, version, ".sha512") downloadedFiles = append(downloadedFiles, hashPath) return path, err } -func (e *Downloader) download(operatingSystem string, a artifact.Artifact, version string) (string, error) { - filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) - if err != nil { - return "", errors.New(err, "generating package name failed") - } - - fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) +// DownloadAsc downloads the package .asc file from configured source. +// It returns absolute path to the downloaded file and a no-nil error if any occurs. +func (e *Downloader) DownloadAsc(_ context.Context, a artifact.Artifact, version string) (string, error) { + path, err := e.download(e.config.OS(), a, version, ".asc") if err != nil { - return "", errors.New(err, "generating package path failed") + os.Remove(path) + return "", err } - return e.downloadFile(filename, fullPath) + return path, nil } -func (e *Downloader) downloadHash(operatingSystem string, a artifact.Artifact, version string) (string, error) { +func (e *Downloader) download( + operatingSystem string, + a artifact.Artifact, + version, + extension string) (string, error) { filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") @@ -88,8 +90,10 @@ func (e *Downloader) downloadHash(operatingSystem string, a artifact.Artifact, v return "", errors.New(err, "generating package path failed") } - filename = filename + ".sha512" - fullPath = fullPath + ".sha512" + if extension != "" { + filename += extension + fullPath += extension + } return e.downloadFile(filename, fullPath) } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz similarity index 100% rename from internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz rename to internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc new file mode 100644 index 00000000000..dc0dc3745c9 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.asc @@ -0,0 +1,14 @@ +-----BEGIN PGP SIGNATURE----- + +iQGzBAABCgAdFiEE81a455Doc5DWexOcF4e6ez4rqzAFAmUn7TsACgkQF4e6ez4r +qzDcIgwArpuXDex9aisWFWkXjCfjhJdrTTXr3wv8W68NeFsAaazLlvsWPxdol1db +FeKFL+P/P/PhlTvdkZw9xMyXoVRWQXJ2p2jVjV0Wq2SCtbbjdrGjQ4OrchgE9FW7 +onWxqV8RjzPyaMwpDWWtHKgxhQeLP5yXhWm6RXHvBLZ5mqbTCuIq2Q4sijEd6IFD +9JoAA276tqyKGOsPZ1QzaPUFF69B9QLcWasEuNFf5ytMVFfTcMl6/HYDPO7ErhJx +E1hnKGIc5rrMghL0LzaVLGYZUtnQwru02ZA0omXzEv1uYgqmZl75g9qHk2Cu2V5W +0qbg9OtUKOkJ1sODvsVv8O40rVazdZTgL2ifNLi2wFwR3syMdHCih2aKMcPDPzt3 +Q4q0zvsxuR9PGsv5+8zze74iC3oZSvF8h36XGjJuyjEFORUpcWNGDmhsC6l7ql5W +rEbIPZ19j3r1M4yHG/ptBmrwRnQz9RKFnwTO9ME/5eBVumPLUD5kAcYXjvAFYQI5 +qEc7okL5 +=+nvi +-----END PGP SIGNATURE----- diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.sha512 similarity index 74% rename from internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 rename to internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.sha512 index 5d0fc9e405d..599ae848893 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/elastic-agent-8.0.0-darwin-x86_64.tar.gz.sha512 @@ -1 +1 @@ -9af9aa016f3349aa248034629e4336ca2f4d31317bfb8c9a23a9d924c18969cf43ad93727e784da010a272690b2b5ce4c4ded3a5d2039e4408e93e1e18d113db beat-8.0.0-darwin-x86_64.tar.gz +9af9aa016f3349aa248034629e4336ca2f4d31317bfb8c9a23a9d924c18969cf43ad93727e784da010a272690b2b5ce4c4ded3a5d2039e4408e93e1e18d113db elastic-agent-8.0.0-darwin-x86_64.tar.gz diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/public-key.pgp b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/public-key.pgp new file mode 100644 index 00000000000..7d452cb033c --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/public-key.pgp @@ -0,0 +1,40 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQGNBGUn7JEBDADH0iBdohpZIQY7QyBz9Hl68b7fq0zoFcB4HTDDMQD1ouDQfPwg +Frpr/ViNNHsye1QfrUWXN8FQfKztqHtUHeM8ggdSvhYYGaDtVSuVakoNNz3Z3+kD +YhwH0byZrE2MiCKExtgQYWBIDd1TeCMSOgYcQPAXPqQBwX0G1xRAY3s+eazCjaSU +aNJtlNuAx36jEBa+X73sTh+Y/OtCSN9s75SVYu5xJ+kXkpcHNvsMJmDCZ0zsKrxT +TMvXSU9qcczj8+wAee/1E77eR01ttrf67IjVReuVZ0OhxucVxJHOp7x9jfeGsjjn +6uhFT0KV+VOaaRlI9wZ4AOMmAX5nroNYP/GC+SKiOvKV79+r3jyxbChqd5nWdSBN +mO9okB72nUpGmL1NosW926MMTauR9/nP1uWB66d/pHYRop7sAbAZ7u8COoRS1wd+ +V6dtb3QUwR9LsfKd1xQfrTFVKZ4i703MN1qkq/6TqLhpwlt0+K4WN7LtkkeFivyx +N0RLiVDzZP289ssAEQEAAbQFYXRlc3SJAc4EEwEKADgWIQTzVrjnkOhzkNZ7E5wX +h7p7PiurMAUCZSfskQIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAXh7p7 +PiurMFkbDAC0QLqwq4dZGjOqcNjj02DOM1eQcC8XUSy0w8X6gX/69wFHGM34zl4+ +IO7H6ujvkBxMHmeEU3nNsLH+WsN6Hc8JBRQZSqjySgL2am+K6XYMcP7h7VGnFR0r +5IKbGn9zCR7xkVrkvW0T48U0fJ00X3v+GWcxcBQIu58sMmKrmzliPCDhmQ94yum8 +n8Yc1tB3DazAQEDGxtfP8/yc93sWKZ4qKPBMZUsjSSzC8a7zei/J9vJccRy/JJEl +/mNIQx7FxObrCSSa3wXc4AEbWdq4HNZkahLvnOs4EhNR9ihWg7TtMVyBesV/rdgj +5cgHU3erir1nSOHmrHqLydeWH4vHW4R6BYuJd6NXhsISMHO8Oerlceqmt7aex3wJ +09ULyareJ3QMc+HWcjxxYbSLU6j5ZgCqcPz17V88W7SkXnzbPaoVAxMCf+M3a0Ib +r+Yw6CrvWRj2+bmW8Ars6fND90nX4ZS82VnMc27kFqNYdkAE9kdlZ+L8OU70nWmT +Clh2FhjhHKe5AY0EZSfskQEMANT+4NWxDtyExZEIvwUdegcetF3hbdHlXMmMnuPU +vJwPhXhXJtzyX5VKRp3WCUO2TOGMKaHaNPi4XCS4QMzBEEft8C7X896QPGqanGdV +oZ9Oc/mXNZfuOk62hP6Ifn38VIyxAcpQ11ypKJ5wFSwSvkPIdaXm1125oGIFQg+W +51GSNz8PBuP5GavLs3L1Wp2VupJ9pOrolxGRP+t41u6rNewaktSO6eLY0o0j/FMY +Anujnj68sS92e7TnQcaAEUsplYLrZlZI1Ly0W2QakvOUIkDq5DSsNYKypTM1rZ7s +VYENPjHdhATsHoW1LxirBKHuoi8aANSjsofdggnxtu+sp8mk/+oZpyR78yA/+hIA +/t/wEVgVXETTB0Y8o6n8+/U/uBHEjYGa8JJEcMbNJesQAusBXYt90N8URKHRWEcR +L9IH3V4rmssDqgE7voHYvNKFru/socsI3WPmDnPKFWGRd7rqzlkBoqbrPiD/tRIC +cwDqz5hm3vKqOkHqvsGqzNVp4wARAQABiQG2BBgBCgAgFiEE81a455Doc5DWexOc +F4e6ez4rqzAFAmUn7JECGwwACgkQF4e6ez4rqzA23gv/UZTQw13urB8Hf6s5FJyz +z5dCWT1RMW1ig7MuCe/MzRCk29zDc16y5fOo0aLzYMWsQXBrBTAXj6hx2/MYHXg0 +mUXzxrnUqM5H/b1hbx52NdwD1eR1prQIX39ifPzw+FTirD98qx04479En/561PQW +lbWXtm1/JoaSpGIYP2gWNgb3HfHShEGPxFH39vxmP6XVz99BL+3zaHehcCUP8fbC +Kabo/qbtNC/nZEBUVVMxEj2O9eEq9otk8K8fBzoCOQ4K0Idn+BnQ0O67x4jemunD +JX6BGBo0WYxJNarK2sJw5+CVRK472va8U6Y+6yGyv5qu68eOZZXvkrCbDpysSIf7 +YjwhmaZuerd4oBvRKJHbbHoqgde8sviSjm6cdU+ZSHILvwEaBLwW3pTgBJAupQcV +4Ws7fo7/6R2YWws8c4sseGqLC+XxCXk+SvrvyA02ZBY+0L6IFD6Cb8BT0uMMrLIP +YcZ1xK3gfrp4PCg2OFj46WER5ufHP1r0zvufY7chA9tP +=Jwiw +-----END PGP PUBLIC KEY BLOCK----- diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go index e42a35c76a4..8c7861e1c75 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go @@ -6,7 +6,6 @@ package fs import ( "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -26,11 +25,10 @@ const ( // The signature is validated against Elastic's public GPG key that is // embedded into Elastic Agent. type Verifier struct { - config *artifact.Config - client http.Client - pgpBytes []byte - allowEmptyPgp bool - log *logger.Logger + config *artifact.Config + client http.Client + defaultKey []byte + log *logger.Logger } func (v *Verifier) Name() string { @@ -39,9 +37,9 @@ func (v *Verifier) Name() string { // NewVerifier creates a verifier checking downloaded package on preconfigured // location against a key stored on elastic.co website. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Verifier, error) { - if len(pgp) == 0 && !allowEmptyPgp { - return nil, errors.New("expecting PGP but retrieved none", errors.TypeSecurity) +func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte) (*Verifier, error) { + if len(pgp) == 0 { + return nil, errors.New("expecting PGP key but received none", errors.TypeSecurity) } client, err := config.HTTPTransportSettings.Client( @@ -55,11 +53,10 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool } v := &Verifier{ - config: config, - client: *client, - allowEmptyPgp: allowEmptyPgp, - pgpBytes: pgp, - log: log, + config: config, + client: *client, + defaultKey: pgp, + log: log, } return v, nil @@ -70,24 +67,22 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error { filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { - return errors.New(err, "retrieving package name") + return fmt.Errorf("could not get artifact name: %w", err) } - fullPath := filepath.Join(v.config.TargetDirectory, filename) + artifactPath := filepath.Join(v.config.TargetDirectory, filename) - if err = download.VerifySHA512Hash(fullPath); err != nil { - var checksumMismatchErr *download.ChecksumMismatchError - if errors.As(err, &checksumMismatchErr) { - os.Remove(fullPath) - os.Remove(fullPath + ".sha512") - } - return err + if err = download.VerifySHA512HashWithCleanup(v.log, artifactPath); err != nil { + return fmt.Errorf("failed to verify SHA512 hash: %w", err) } - if err = v.verifyAsc(fullPath, skipDefaultPgp, pgpBytes...); err != nil { + if err = v.verifyAsc(artifactPath, skipDefaultPgp, pgpBytes...); err != nil { var invalidSignatureErr *download.InvalidSignatureError if errors.As(err, &invalidSignatureErr) { - os.Remove(fullPath + ".asc") + if err := os.Remove(artifactPath + ".asc"); err != nil { + v.log.Warnf("failed clean up after signature verification: failed to remove %q: %v", + artifactPath+".asc", err) + } } return err } @@ -113,63 +108,25 @@ func (v *Verifier) Reload(c *artifact.Config) error { return nil } -func (v *Verifier) verifyAsc(fullPath string, skipDefaultPgp bool, pgpSources ...string) error { +func (v *Verifier) verifyAsc(fullPath string, skipDefaultKey bool, pgpSources ...string) error { var pgpBytes [][]byte - if len(v.pgpBytes) > 0 && !skipDefaultPgp { - v.log.Infof("Default PGP being appended") - pgpBytes = append(pgpBytes, v.pgpBytes) - } - - for _, check := range pgpSources { - if len(check) == 0 { - continue - } - raw, err := download.PgpBytesFromSource(v.log, check, &v.client) - if err != nil { - return err - } - - if len(raw) == 0 { - continue - } - - pgpBytes = append(pgpBytes, raw) - } - - if len(pgpBytes) == 0 { - // no pgp available skip verification process - v.log.Infof("No checks defined") - return nil + pgpBytes, err := download.FetchPGPKeys( + v.log, v.client, v.defaultKey, skipDefaultKey, pgpSources) + if err != nil { + return fmt.Errorf("could not fetch pgp keys: %w", err) } - v.log.Infof("Using %d PGP keys", len(pgpBytes)) ascBytes, err := v.getPublicAsc(fullPath) - if err != nil && v.allowEmptyPgp { - // asc not available but we allow empty for dev use-case - return nil - } else if err != nil { - return err - } - - for i, check := range pgpBytes { - err = download.VerifyGPGSignature(fullPath, ascBytes, check) - if err == nil { - // verify successful - v.log.Infof("Verification with PGP[%d] successful", i) - return nil - } - v.log.Warnf("Verification with PGP[%d] failed: %v", i, err) + if err != nil { + return fmt.Errorf("could not get .asc file: %w", err) } - v.log.Warnf("Verification failed") - - // return last error - return err + return download.VerifyPGPSignatureWithKeys(v.log, fullPath, ascBytes, pgpBytes) } func (v *Verifier) getPublicAsc(fullPath string) ([]byte, error) { fullPath = fmt.Sprintf("%s%s", fullPath, ascSuffix) - b, err := ioutil.ReadFile(fullPath) + b, err := os.ReadFile(fullPath) if err != nil { return nil, errors.New(err, fmt.Sprintf("fetching asc file from '%s'", fullPath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go index 5012e8244dd..4bd605142f3 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go @@ -5,10 +5,10 @@ package fs import ( + "bytes" "context" "crypto/sha512" "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -18,11 +18,11 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/testing/pgptest" ) const ( @@ -30,21 +30,30 @@ const ( ) var ( - beatSpec = artifact.Artifact{Name: "Filebeat", Cmd: "filebeat", Artifact: "beat/filebeat"} + beatSpec = artifact.Artifact{ + Name: "Elastic Agent", + Cmd: "elastic-agent", + Artifact: "beat/elastic-agent"} ) func TestFetchVerify(t *testing.T) { + // See docs/pgp-sign-verify-artifact.md for how to generate a key, export + // the public key, sign a file and verify it. + log, _ := logger.New("", false) timeout := 15 * time.Second dropPath := filepath.Join("testdata", "drop") installPath := filepath.Join("testdata", "install") targetPath := filepath.Join("testdata", "download") ctx := context.Background() - s := artifact.Artifact{Name: "Beat", Cmd: "beat", Artifact: "beats/filebeat"} + a := artifact.Artifact{ + Name: "elastic-agent", Cmd: "elastic-agent", Artifact: "beats/elastic-agent"} version := "8.0.0" - targetFilePath := filepath.Join(targetPath, "beat-8.0.0-darwin-x86_64.tar.gz") - hashTargetFilePath := filepath.Join(targetPath, "beat-8.0.0-darwin-x86_64.tar.gz.sha512") + filename := "elastic-agent-8.0.0-darwin-x86_64.tar.gz" + targetFilePath := filepath.Join(targetPath, filename) + hashTargetFilePath := filepath.Join(targetPath, filename+".sha512") + ascTargetFilePath := filepath.Join(targetPath, filename+".asc") // cleanup defer os.RemoveAll(targetPath) @@ -60,48 +69,52 @@ func TestFetchVerify(t *testing.T) { }, } - err := prepareFetchVerifyTests(dropPath, targetPath, targetFilePath, hashTargetFilePath) - assert.NoError(t, err) + err := prepareFetchVerifyTests(dropPath, targetPath, filename, targetFilePath, hashTargetFilePath) + require.NoError(t, err) - downloader := NewDownloader(config) - verifier, err := NewVerifier(log, config, true, nil) - assert.NoError(t, err) + pgp, err := os.ReadFile(filepath.Join(dropPath, "public-key.pgp")) + require.NoError(t, err, "could not read public PGP key") + verifier, err := NewVerifier(log, config, pgp) + require.NoError(t, err, "could not create the verifier") // first download verify should fail: // download skipped, as invalid package is prepared upfront // verify fails and cleans download - err = verifier.Verify(s, version, false) + err = verifier.Verify(a, version, false) var checksumErr *download.ChecksumMismatchError - assert.ErrorAs(t, err, &checksumErr) + require.ErrorAs(t, err, &checksumErr) _, err = os.Stat(targetFilePath) - assert.True(t, os.IsNotExist(err)) + require.True(t, os.IsNotExist(err)) _, err = os.Stat(hashTargetFilePath) - assert.True(t, os.IsNotExist(err)) + require.True(t, os.IsNotExist(err)) // second one should pass // download not skipped: package missing // verify passes because hash is not correct - _, err = downloader.Download(ctx, s, version) - assert.NoError(t, err) + _, err = NewDownloader(config).Download(ctx, a, version) + require.NoError(t, err) + asc, err := os.ReadFile(filepath.Join(dropPath, filename+".asc")) + require.NoErrorf(t, err, "could not open .asc for copy") + err = os.WriteFile(ascTargetFilePath, asc, 0o600) + require.NoErrorf(t, err, "could not save .asc (%q) to target path (%q)", + filepath.Join(dropPath, filename+".asc"), ascTargetFilePath) // file downloaded ok _, err = os.Stat(targetFilePath) - assert.NoError(t, err) - + require.NoError(t, err) _, err = os.Stat(hashTargetFilePath) - assert.NoError(t, err) + require.NoError(t, err) + _, err = os.Stat(ascTargetFilePath) + require.NoError(t, err) - err = verifier.Verify(s, version, false) - assert.NoError(t, err) - - // Enable GPG signature validation. - verifier.allowEmptyPgp = false + err = verifier.Verify(a, version, false) + require.NoError(t, err) // Bad GPG public key. { - verifier.pgpBytes = []byte("garbage") + verifier.defaultKey = []byte("garbage") // Don't delete anything. assertFileExists(t, targetFilePath) @@ -109,11 +122,11 @@ func TestFetchVerify(t *testing.T) { } // Setup proper GPG public key. - _, verifier.pgpBytes = release.PGP() + verifier.defaultKey = release.PGP() // Missing .asc file. { - err = verifier.Verify(s, version, false) + err = verifier.Verify(a, version, false) require.Error(t, err) // Don't delete these files when GPG validation failure. @@ -123,10 +136,10 @@ func TestFetchVerify(t *testing.T) { // Invalid signature. { - err = ioutil.WriteFile(targetFilePath+".asc", []byte("bad sig"), 0o600) + err = os.WriteFile(targetFilePath+".asc", []byte("bad sig"), 0o600) require.NoError(t, err) - err = verifier.Verify(s, version, false) + err = verifier.Verify(a, version, false) var invalidSigErr *download.InvalidSignatureError assert.ErrorAs(t, err, &invalidSigErr) @@ -139,9 +152,14 @@ func TestFetchVerify(t *testing.T) { } } -func prepareFetchVerifyTests(dropPath, targetDir, targetFilePath, hashTargetFilePath string) error { - sourceFilePath := filepath.Join(dropPath, "beat-8.0.0-darwin-x86_64.tar.gz") - hashSourceFilePath := filepath.Join(dropPath, "beat-8.0.0-darwin-x86_64.tar.gz.sha512") +func prepareFetchVerifyTests( + dropPath, + targetDir, + filename, + targetFilePath, + hashTargetFilePath string) error { + sourceFilePath := filepath.Join(dropPath, filename) + hashSourceFilePath := filepath.Join(dropPath, filename+".sha512") // clean targets os.Remove(targetFilePath) @@ -163,13 +181,13 @@ func prepareFetchVerifyTests(dropPath, targetDir, targetFilePath, hashTargetFile } defer targretFile.Close() - hashContent, err := ioutil.ReadFile(hashSourceFilePath) + hashContent, err := os.ReadFile(hashSourceFilePath) if err != nil { return err } corruptedHash := append([]byte{1, 2, 3, 4, 5, 6}, hashContent[6:]...) - return ioutil.WriteFile(hashTargetFilePath, corruptedHash, 0666) + return os.WriteFile(hashTargetFilePath, corruptedHash, 0666) } func TestVerify(t *testing.T) { @@ -185,8 +203,7 @@ func TestVerify(t *testing.T) { for _, tc := range tt { t.Run(tc.Name, func(t *testing.T) { log, obs := logger.NewTesting("TestVerify") - targetDir, err := ioutil.TempDir(os.TempDir(), "") - require.NoError(t, err) + targetDir := t.TempDir() timeout := 30 * time.Second @@ -200,23 +217,18 @@ func TestVerify(t *testing.T) { }, } - err = prepareTestCase(beatSpec, version, config) - require.NoError(t, err) + pgpKey := prepareTestCase(t, beatSpec, version, config) testClient := NewDownloader(config) - artifact, err := testClient.Download(context.Background(), beatSpec, version) - require.NoError(t, err) + artifactPath, err := testClient.Download(context.Background(), beatSpec, version) + require.NoError(t, err, "fs.Downloader could not download artifacts") + _, err = testClient.DownloadAsc(context.Background(), beatSpec, version) + require.NoError(t, err, "fs.Downloader could not download artifacts .asc file") - t.Cleanup(func() { - os.Remove(artifact) - os.Remove(artifact + ".sha512") - os.RemoveAll(config.DropPath) - }) - - _, err = os.Stat(artifact) + _, err = os.Stat(artifactPath) require.NoError(t, err) - testVerifier, err := NewVerifier(log, config, true, nil) + testVerifier, err := NewVerifier(log, config, pgpKey) require.NoError(t, err) err = testVerifier.Verify(beatSpec, version, false, tc.RemotePGPUris...) @@ -229,25 +241,40 @@ func TestVerify(t *testing.T) { } } -func prepareTestCase(a artifact.Artifact, version string, cfg *artifact.Config) error { +// prepareTestCase creates an artifact file, defined by 'a' and 'version', +// its corresponding checksum (.sha512) and signature (.asc) files. +// It creates the necessary key to sing the artifact and returns the public key +// to verify the signature. +func prepareTestCase( + t *testing.T, + a artifact.Artifact, + version string, + cfg *artifact.Config) []byte { + filename, err := artifact.GetArtifactName(a, version, cfg.OperatingSystem, cfg.Architecture) - if err != nil { - return err - } + require.NoErrorf(t, err, "could not get artifact name") - if err := os.MkdirAll(cfg.DropPath, 0777); err != nil { - return err - } + err = os.MkdirAll(cfg.DropPath, 0777) + require.NoErrorf(t, err, "failed creating directory %q", cfg.DropPath) + + filePath := filepath.Join(cfg.DropPath, filename) + filePathSHA := filePath + ".sha512" + filePathASC := filePath + ".asc" content := []byte("sample content") + err = os.WriteFile(filePath, content, 0644) + require.NoErrorf(t, err, "could not write %q file", filePath) + hash := sha512.Sum512(content) hashContent := fmt.Sprintf("%x %s", hash, filename) + err = os.WriteFile(filePathSHA, []byte(hashContent), 0644) + require.NoErrorf(t, err, "could not write %q file", filePathSHA) - if err := ioutil.WriteFile(filepath.Join(cfg.DropPath, filename), content, 0644); err != nil { - return err - } + pub, sig := pgptest.Sing(t, bytes.NewReader(content)) + err = os.WriteFile(filePathASC, sig, 0644) + require.NoErrorf(t, err, "could not write %q file", filePathASC) - return ioutil.WriteFile(filepath.Join(cfg.DropPath, filename+".sha512"), []byte(hashContent), 0644) + return pub } func assertFileExists(t testing.TB, path string) { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go new file mode 100644 index 00000000000..cfc899420c2 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go @@ -0,0 +1,121 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "bytes" + "context" + "crypto/sha512" + "fmt" + "net" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/testing/pgptest" +) + +const ( + version = "7.5.1" + sourcePattern = "/downloads/beats/filebeat/" + source = "http://artifacts.elastic.co/downloads/" +) + +var ( + beatSpec = artifact.Artifact{ + Name: "filebeat", + Cmd: "filebeat", + Artifact: "beats/filebeat", + } +) + +type testCase struct { + system string + arch string +} + +func getTestCases() []testCase { + // always test random package to save time + return []testCase{ + {"linux", "32"}, + {"linux", "64"}, + {"linux", "arm64"}, + {"darwin", "32"}, + {"darwin", "64"}, + {"windows", "32"}, + {"windows", "64"}, + } +} + +func getElasticCoServer(t *testing.T) (*httptest.Server, []byte) { + correctValues := map[string]struct{}{ + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i386.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "amd64.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "darwin-x86_64.tar.gz"): {}, + } + var resp []byte + content := []byte("anything will do") + hash := sha512.Sum512(content) + pub, sig := pgptest.Sing(t, bytes.NewReader(content)) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + packageName := r.URL.Path[len(sourcePattern):] + + ext := filepath.Ext(packageName) + if ext == ".gz" { + ext = ".tar.gz" + } + packageName = strings.TrimSuffix(packageName, ext) + + switch ext { + case ".sha512": + resp = []byte(fmt.Sprintf("%x %s", hash, packageName)) + case ".asc": + resp = sig + case ".tar.gz", ".zip", ".deb", ".rpm": + packageName += ext + resp = content + default: + w.WriteHeader(http.StatusNotFound) + t.Errorf("mock elastic.co server: unknown file extension: %q", ext) + return + } + + if _, ok := correctValues[packageName]; !ok { + t.Errorf("mock elastic.co server: invalid package name: %q", packageName) + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte{}) + return + } + + _, err := w.Write(resp) + assert.NoErrorf(t, err, "mock elastic.co server: failes writing response") + }) + + return httptest.NewServer(handler), pub +} + +func getElasticCoClient(server *httptest.Server) http.Client { + return http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, network, s string) (net.Conn, error) { + _ = s + return net.Dial(network, server.Listener.Addr().String()) + }, + }, + } +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 7be3ae1066f..a02585e5938 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -18,7 +18,6 @@ import ( "time" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 119173e1344..4c341a2aa5e 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -30,6 +30,48 @@ import ( "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) +func TestDownload(t *testing.T) { + targetDir, err := ioutil.TempDir(os.TempDir(), "") + if err != nil { + t.Fatal(err) + } + + log, _ := logger.New("", false) + timeout := 30 * time.Second + testCases := getTestCases() + server, _ := getElasticCoServer(t) + elasticClient := getElasticCoClient(server) + + config := &artifact.Config{ + SourceURI: source, + TargetDirectory: targetDir, + HTTPTransportSettings: httpcommon.HTTPTransportSettings{ + Timeout: timeout, + }, + } + + for _, testCase := range testCases { + testName := fmt.Sprintf("%s-binary-%s", testCase.system, testCase.arch) + t.Run(testName, func(t *testing.T) { + config.OperatingSystem = testCase.system + config.Architecture = testCase.arch + + testClient := NewDownloaderWithClient(log, config, elasticClient) + artifactPath, err := testClient.Download(context.Background(), beatSpec, version) + if err != nil { + t.Fatal(err) + } + + _, err = os.Stat(artifactPath) + if err != nil { + t.Fatal(err) + } + + os.Remove(artifactPath) + }) + } +} + func TestDownloadBodyError(t *testing.T) { // This tests the scenario where the download encounters a network error // part way through the download, while copying the response body. diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go deleted file mode 100644 index bd1564cab2b..00000000000 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package http - -import ( - "context" - "crypto/sha512" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent-libs/transport/httpcommon" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - version = "7.5.1" - sourcePattern = "/downloads/beats/filebeat/" - source = "http://artifacts.elastic.co/downloads/" -) - -var ( - beatSpec = artifact.Artifact{ - Name: "filebeat", - Cmd: "filebeat", - Artifact: "beats/filebeat", - } -) - -type testCase struct { - system string - arch string -} - -func TestDownload(t *testing.T) { - targetDir, err := ioutil.TempDir(os.TempDir(), "") - if err != nil { - t.Fatal(err) - } - - log, _ := logger.New("", false) - timeout := 30 * time.Second - testCases := getTestCases() - elasticClient := getElasticCoClient() - - config := &artifact.Config{ - SourceURI: source, - TargetDirectory: targetDir, - HTTPTransportSettings: httpcommon.HTTPTransportSettings{ - Timeout: timeout, - }, - } - - for _, testCase := range testCases { - testName := fmt.Sprintf("%s-binary-%s", testCase.system, testCase.arch) - t.Run(testName, func(t *testing.T) { - config.OperatingSystem = testCase.system - config.Architecture = testCase.arch - - testClient := NewDownloaderWithClient(log, config, elasticClient) - artifactPath, err := testClient.Download(context.Background(), beatSpec, version) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(artifactPath) - if err != nil { - t.Fatal(err) - } - - os.Remove(artifactPath) - }) - } -} - -func TestVerify(t *testing.T) { - targetDir, err := ioutil.TempDir(os.TempDir(), "") - if err != nil { - t.Fatal(err) - } - - log, _ := logger.New("", false) - timeout := 30 * time.Second - testCases := getRandomTestCases() - elasticClient := getElasticCoClient() - - config := &artifact.Config{ - SourceURI: source, - TargetDirectory: targetDir, - HTTPTransportSettings: httpcommon.HTTPTransportSettings{ - Timeout: timeout, - }, - } - - for _, testCase := range testCases { - testName := fmt.Sprintf("%s-binary-%s", testCase.system, testCase.arch) - t.Run(testName, func(t *testing.T) { - config.OperatingSystem = testCase.system - config.Architecture = testCase.arch - - testClient := NewDownloaderWithClient(log, config, elasticClient) - artifact, err := testClient.Download(context.Background(), beatSpec, version) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(artifact) - if err != nil { - t.Fatal(err) - } - - testVerifier, err := NewVerifier(log, config, true, nil) - if err != nil { - t.Fatal(err) - } - - err = testVerifier.Verify(beatSpec, version, false) - require.NoError(t, err) - - os.Remove(artifact) - os.Remove(artifact + ".sha512") - }) - } -} - -func getTestCases() []testCase { - // always test random package to save time - return []testCase{ - {"linux", "32"}, - {"linux", "64"}, - {"linux", "arm64"}, - {"darwin", "32"}, - {"darwin", "64"}, - {"windows", "32"}, - {"windows", "64"}, - } -} - -func getRandomTestCases() []testCase { - tt := getTestCases() - - rand.Seed(time.Now().UnixNano()) - first := rand.Intn(len(tt)) - second := rand.Intn(len(tt)) - - return []testCase{ - tt[first], - tt[second], - } -} - -func getElasticCoClient() http.Client { - correctValues := map[string]struct{}{ - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i386.deb"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "amd64.deb"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "darwin-x86_64.tar.gz"): {}, - } - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - packageName := r.URL.Path[len(sourcePattern):] - isShaReq := strings.HasSuffix(packageName, ".sha512") - packageName = strings.TrimSuffix(packageName, ".sha512") - - if _, ok := correctValues[packageName]; !ok { - w.WriteHeader(http.StatusInternalServerError) - } - - content := []byte(packageName) - if isShaReq { - hash := sha512.Sum512(content) - _, err := w.Write([]byte(fmt.Sprintf("%x %s", hash, packageName))) - if err != nil { - panic(err) - } - } else { - _, err := w.Write(content) - if err != nil { - panic(err) - } - } - }) - server := httptest.NewServer(handler) - - return http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, network, _ string) (net.Conn, error) { - return net.Dial(network, server.Listener.Addr().String()) - }, - }, - } -} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 99fca9f65d4..50aa64fab1e 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -30,11 +30,10 @@ const ( // Verifier verifies a downloaded package by comparing with public ASC // file from elastic.co website. type Verifier struct { - config *artifact.Config - client http.Client - pgpBytes []byte - allowEmptyPgp bool - log *logger.Logger + config *artifact.Config + client http.Client + defaultKey []byte + log *logger.Logger } func (v *Verifier) Name() string { @@ -43,9 +42,9 @@ func (v *Verifier) Name() string { // NewVerifier create a verifier checking downloaded package on preconfigured // location against a key stored on elastic.co website. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Verifier, error) { - if len(pgp) == 0 && !allowEmptyPgp { - return nil, errors.New("expecting PGP but retrieved none", errors.TypeSecurity) +func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte) (*Verifier, error) { + if len(pgp) == 0 { + return nil, errors.New("expecting PGP key received none", errors.TypeSecurity) } client, err := config.HTTPTransportSettings.Client( @@ -59,11 +58,10 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool } v := &Verifier{ - config: config, - client: *client, - allowEmptyPgp: allowEmptyPgp, - pgpBytes: pgp, - log: log, + config: config, + client: *client, + defaultKey: pgp, + log: log, } return v, nil @@ -90,24 +88,26 @@ func (v *Verifier) Reload(c *artifact.Config) error { // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error { - fullPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) + artifactPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") } - if err = download.VerifySHA512Hash(fullPath); err != nil { - var checksumMismatchErr *download.ChecksumMismatchError - if errors.As(err, &checksumMismatchErr) { - os.Remove(fullPath) - os.Remove(fullPath + ".sha512") - } - return err + if err = download.VerifySHA512HashWithCleanup(v.log, artifactPath); err != nil { + return fmt.Errorf("failed to verify SHA512 hash: %w", err) } if err = v.verifyAsc(a, version, skipDefaultPgp, pgpBytes...); err != nil { var invalidSignatureErr *download.InvalidSignatureError if errors.As(err, &invalidSignatureErr) { - os.Remove(fullPath + ".asc") + if err := os.Remove(artifactPath); err != nil { + v.log.Warnf("failed clean up after signature verification: failed to remove %q: %v", + artifactPath, err) + } + if err := os.Remove(artifactPath + ascSuffix); err != nil { + v.log.Warnf("failed clean up after sha512 check: failed to remove %q: %v", + artifactPath+ascSuffix, err) + } } return err } @@ -115,36 +115,7 @@ func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bo return nil } -func (v *Verifier) verifyAsc(a artifact.Artifact, version string, skipDefaultPgp bool, pgpSources ...string) error { - var pgpBytes [][]byte - if len(v.pgpBytes) > 0 && !skipDefaultPgp { - v.log.Infof("Default PGP being appended") - pgpBytes = append(pgpBytes, v.pgpBytes) - } - - for _, check := range pgpSources { - if len(check) == 0 { - continue - } - raw, err := download.PgpBytesFromSource(v.log, check, &v.client) - if err != nil { - return err - } - - if len(raw) == 0 { - continue - } - - pgpBytes = append(pgpBytes, raw) - } - - if len(pgpBytes) == 0 { - // no pgp available skip verification process - v.log.Infof("No checks defined") - return nil - } - v.log.Infof("Using %d PGP keys", len(pgpBytes)) - +func (v *Verifier) verifyAsc(a artifact.Artifact, version string, skipDefaultKey bool, pgpSources ...string) error { filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") @@ -161,27 +132,17 @@ func (v *Verifier) verifyAsc(a artifact.Artifact, version string, skipDefaultPgp } ascBytes, err := v.getPublicAsc(ascURI) - if err != nil && v.allowEmptyPgp { - // asc not available but we allow empty for dev use-case - return nil - } else if err != nil { + if err != nil { return errors.New(err, fmt.Sprintf("fetching asc file from %s", ascURI), errors.TypeNetwork, errors.M(errors.MetaKeyURI, ascURI)) } - for i, check := range pgpBytes { - err = download.VerifyGPGSignature(fullPath, ascBytes, check) - if err == nil { - // verify successful - v.log.Infof("Verification with PGP[%d] successful", i) - return nil - } - v.log.Warnf("Verification with PGP[%d] failed: %v", i, err) + pgpBytes, err := download.FetchPGPKeys( + v.log, v.client, v.defaultKey, skipDefaultKey, pgpSources) + if err != nil { + return fmt.Errorf("could not fetch pgp keys: %w", err) } - v.log.Warnf("Verification failed") - - // return last error - return err + return download.VerifyPGPSignatureWithKeys(v.log, fullPath, ascBytes, pgpBytes) } func (v *Verifier) composeURI(filename, artifactName string) (string, error) { @@ -204,11 +165,12 @@ func (v *Verifier) composeURI(filename, artifactName string) (string, error) { func (v *Verifier) getPublicAsc(sourceURI string) ([]byte, error) { ctx, cancelFn := context.WithTimeout(context.Background(), 30*time.Second) defer cancelFn() - // Change NewRequest to NewRequestWithContext and pass context it req, err := http.NewRequestWithContext(ctx, http.MethodGet, sourceURI, nil) if err != nil { return nil, errors.New(err, "failed create request for loading public key", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } + + // TODO: receive a http.Client resp, err := http.DefaultClient.Do(req) if err != nil { return nil, errors.New(err, "failed loading public key", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go new file mode 100644 index 00000000000..c8f3405f404 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func TestVerify(t *testing.T) { + targetDir := t.TempDir() + + log, _ := logger.New("", false) + timeout := 30 * time.Second + testCases := getRandomTestCases() + server, pub := getElasticCoServer(t) + elasticClient := getElasticCoClient(server) + // artifact/download/http.Verifier uses http.DefaultClient, thus we need to + // change it. + http.DefaultClient = &elasticClient + + config := &artifact.Config{ + SourceURI: source, + TargetDirectory: targetDir, + HTTPTransportSettings: httpcommon.HTTPTransportSettings{ + Timeout: timeout, + }, + } + + for _, testCase := range testCases { + testName := fmt.Sprintf("%s-binary-%s", testCase.system, testCase.arch) + t.Run(testName, func(t *testing.T) { + config.OperatingSystem = testCase.system + config.Architecture = testCase.arch + + testClient := NewDownloaderWithClient(log, config, elasticClient) + artifact, err := testClient.Download(context.Background(), beatSpec, version) + if err != nil { + t.Fatal(err) + } + + _, err = os.Stat(artifact) + if err != nil { + t.Fatal(err) + } + + testVerifier, err := NewVerifier(log, config, pub) + if err != nil { + t.Fatal(err) + } + + err = testVerifier.Verify(beatSpec, version, false) + require.NoError(t, err) + }) + } +} + +func getRandomTestCases() []testCase { + tt := getTestCases() + + first := rand.Intn(len(tt)) + second := rand.Intn(len(tt)) + + return []testCase{ + tt[first], + tt[second], + } +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go index fc2c3a806be..c92b01b207c 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go @@ -17,10 +17,10 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte) (download.Verifier, error) { verifiers := make([]download.Verifier, 0, 3) - fsVer, err := fs.NewVerifier(log, config, allowEmptyPgp, pgp) + fsVer, err := fs.NewVerifier(log, config, pgp) if err != nil { return nil, err } @@ -30,7 +30,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool // useful for testing with a snapshot version of fleet for example // try snapshot repo before official if release.Snapshot() { - snapshotVerifier, err := snapshot.NewVerifier(log, config, allowEmptyPgp, pgp, nil) + snapshotVerifier, err := snapshot.NewVerifier(log, config, pgp, nil) if err != nil { log.Error(err) } else { @@ -38,7 +38,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool } } - remoteVer, err := http.NewVerifier(log, config, allowEmptyPgp, pgp) + remoteVer, err := http.NewVerifier(log, config, pgp) if err != nil { return nil, err } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index 302aa93e766..060c5e9fa10 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -24,12 +24,12 @@ func (v *Verifier) Name() string { // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte, versionOverride *agtversion.ParsedSemVer) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte, versionOverride *agtversion.ParsedSemVer) (download.Verifier, error) { cfg, err := snapshotConfig(config, versionOverride) if err != nil { return nil, err } - v, err := http.NewVerifier(log, cfg, allowEmptyPgp, pgp) + v, err := http.NewVerifier(log, cfg, pgp) if err != nil { return nil, errors.New(err, "failed to create snapshot verifier") } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go index 662367f4909..79fc2348711 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go @@ -21,7 +21,6 @@ import ( "time" "github.com/hashicorp/go-multierror" - "golang.org/x/crypto/openpgp" //nolint:staticcheck // crypto/openpgp is only receiving security updates. "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" @@ -39,12 +38,18 @@ var ( ErrUnknownPGPSource = errors.New("unknown pgp source") ) -// warnLogger is a logger that only needs to implement Warnf, as that is the only functions -// that the downloadProgressReporter uses. +// warnLogger is a logger that only needs to implement Warnf. type warnLogger interface { Warnf(format string, args ...interface{}) } +// loggerInfofWarnf is a logger that only needs to implement Infof and Warnf. +type infoWarnLogger interface { + warnLogger + + Infof(format string, args ...interface{}) +} + // ChecksumMismatchError indicates the expected checksum for a file does not // match the computed checksum. type ChecksumMismatchError struct { @@ -70,18 +75,40 @@ func (e *InvalidSignatureError) Error() string { // Unwrap returns the cause. func (e *InvalidSignatureError) Unwrap() error { return e.Err } -// Verifier is an interface verifying the SHA512 checksum and GPG signature and +// Verifier is an interface verifying the SHA512 checksum and PGP signature and // of a downloaded artifact. type Verifier interface { Name() string - // Verify should verify the artifact and return if succeed status (true|false) and an error if any checks fail. - // If the checksum does no match Verify returns a - // *download.ChecksumMismatchError. And if the GPG signature is invalid then - // Verify returns a *download.InvalidSignatureError. Use errors.As() to - // check error types. + // Verify should verify the artifact, returning an error if any checks fail. + // If the checksum does no match Verify returns a *download.ChecksumMismatchError. + // If the PGP signature check fails then Verify returns a + // *download.InvalidSignatureError. Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error } +// VerifySHA512HashWithCleanup calls VerifySHA512Hash and, in case of a +// *ChecksumMismatchError, performs a cleanup by deleting both the filename and +// filename.sha512 files. If the cleanup fails, it logs a warning. +func VerifySHA512HashWithCleanup(log infoWarnLogger, filename string) error { + if err := VerifySHA512Hash(filename); err != nil { + var checksumMismatchErr *ChecksumMismatchError + if errors.As(err, &checksumMismatchErr) { + if err := os.Remove(filename); err != nil { + log.Warnf("failed clean up after sha512 verification: failed to remove %q: %v", + filename, err) + } + if err := os.Remove(filename + ".sha512"); err != nil { + log.Warnf("failed clean up after sha512 check: failed to remove %q: %v", + filename+".sha512", err) + } + } + + return err + } + + return nil +} + // VerifySHA512Hash checks that a sidecar file containing a sha512 checksum // exists and that the checksum in the sidecar file matches the checksum of // the file. It returns an error if validation fails. @@ -89,7 +116,7 @@ func VerifySHA512Hash(filename string) error { // Read expected checksum. expectedHash, err := readChecksumFile(filename+".sha512", filepath.Base(filename)) if err != nil { - return err + return fmt.Errorf("could not read checksum file: %w", err) } // Compute sha512 checksum. @@ -101,10 +128,10 @@ func VerifySHA512Hash(filename string) error { hash := sha512.New() if _, err := io.Copy(hash, f); err != nil { - return err + return fmt.Errorf("faled to read file to calculate hash") } - computedHash := hex.EncodeToString(hash.Sum(nil)) + computedHash := hex.EncodeToString(hash.Sum(nil)) if computedHash != expectedHash { return &ChecksumMismatchError{ Expected: expectedHash, @@ -157,11 +184,27 @@ func readChecksumFile(checksumFile, filename string) (string, error) { return checksum, nil } -// VerifyGPGSignature verifies the GPG signature of a file. It accepts the path +func VerifyPGPSignatureWithKeys( + log infoWarnLogger, file string, asciiArmorSignature []byte, publicKeys [][]byte) error { + var err error + for i, key := range publicKeys { + err = VerifyPGPSignature(file, asciiArmorSignature, key) + if err == nil { + log.Infof("Verification with PGP[%d] successful", i) + return nil + } + log.Warnf("Verification with PGP[%d] failed: %v", i, err) + } + + log.Warnf("Verification failed: %v", err) + return fmt.Errorf("could not verify PGP signature of %q: %w", file, err) +} + +// VerifyPGPSignature verifies the GPG signature of a file. It accepts the path // to the file to verify, the ASCII armored signature, and the public key to // check against. If there is a problem with the signature then a // *download.InvalidSignatureError is returned. -func VerifyGPGSignature(file string, asciiArmorSignature, publicKey []byte) error { +func VerifyPGPSignature(file string, asciiArmorSignature, publicKey []byte) error { keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(publicKey)) if err != nil { return errors.New(err, "read armored key ring", errors.TypeSecurity) @@ -181,6 +224,39 @@ func VerifyGPGSignature(file string, asciiArmorSignature, publicKey []byte) erro return nil } +func FetchPGPKeys(log infoWarnLogger, client http.Client, defaultPGPKey []byte, skipDefaultPGP bool, pgpSources []string) ([][]byte, error) { + var pgpKeys [][]byte + if len(defaultPGPKey) > 0 && !skipDefaultPGP { + pgpKeys = append(pgpKeys, defaultPGPKey) + log.Infof("Default PGP appended") + } + + for _, check := range pgpSources { + if len(check) == 0 { + continue + } + + raw, err := PgpBytesFromSource(log, check, &client) + if err != nil { + return nil, err + } + + if len(raw) == 0 { + continue + } + + pgpKeys = append(pgpKeys, raw) + } + + if len(pgpKeys) == 0 { + log.Infof("No PGP key available, skipping verification process") + return nil, nil + } + + log.Infof("Using %d PGP keys", len(pgpKeys)) + return pgpKeys, nil +} + func PgpBytesFromSource(log warnLogger, source string, client HTTPClient) ([]byte, error) { if strings.HasPrefix(source, PgpSourceRawPrefix) { return []byte(strings.TrimPrefix(source, PgpSourceRawPrefix)), nil @@ -191,7 +267,8 @@ func PgpBytesFromSource(log warnLogger, source string, client HTTPClient) ([]byt if errors.Is(err, ErrRemotePGPDownloadFailed) || errors.Is(err, ErrInvalidLocation) { log.Warnf("Skipped remote PGP located at %q because it's unavailable: %v", strings.TrimPrefix(source, PgpSourceURIPrefix), err) } else if err != nil { - log.Warnf("Failed to fetch remote PGP") + log.Warnf("Failed to fetch remote PGP key from %q: %v", + strings.TrimPrefix(source, PgpSourceURIPrefix), err) } return pgpBytes, nil diff --git a/internal/pkg/agent/application/upgrade/artifact/download/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/verifier_test.go index 05ad9a96b91..6e12a4b3d98 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/verifier_test.go @@ -6,10 +6,16 @@ package download import ( "bytes" + "crypto/sha512" + "encoding/hex" + "fmt" "io" "net/http" + "os" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -118,6 +124,85 @@ func TestPgpBytesFromSource(t *testing.T) { } } +func TestVerifySHA512HashWithCleanup_success(t *testing.T) { + data := "I’m the Doctor. I’m a Time Lord. I’m from the planet " + + "Gallifrey in the constellation of Kasterborous. I’m 903 years old and " + + "I’m the man who’s going to save your lives and all 6 billion people on " + + "the planet below. You got a problem with that?" + dir := t.TempDir() + filename := "file" + path := filepath.Join(dir, filename) + + f, err := os.Create(path) + require.NoError(t, err, "could not create file") + fsha512, err := os.Create(path + ".sha512") + require.NoError(t, err, "could not create .sha512 file") + + _, err = fmt.Fprint(f, data) + require.NoError(t, err, "could not write to file") + hash := sha512.Sum512([]byte(data)) + _, err = fmt.Fprintf(fsha512, "%s %s", hex.EncodeToString(hash[:]), filename) + require.NoError(t, err, "could not write to file") + + err = f.Close() + require.NoError(t, err, "could not close file") + err = fsha512.Close() + require.NoError(t, err, "could not close .sha512 file") + + err = VerifySHA512HashWithCleanup(testlogger{t: t}, path) + assert.NoErrorf(t, err, "failed verifying sha512") +} + +func TestVerifySHA512HashWithCleanup_failure(t *testing.T) { + data := "I’m the Doctor. I’m a Time Lord. I’m from the planet " + + "Gallifrey in the constellation of Kasterborous. I’m 903 years old and " + + "I’m the man who’s going to save your lives and all 6 billion people on " + + "the planet below. You got a problem with that?" + dir := t.TempDir() + filename := "file" + path := filepath.Join(dir, filename) + + f, err := os.Create(path) + require.NoError(t, err, "could not create file") + fsha512, err := os.Create(path + ".sha512") + require.NoError(t, err, "could not create .sha512 file") + + _, err = fmt.Fprint(f, data) + require.NoError(t, err, "could not write to file") + _, err = fmt.Fprintf(fsha512, "%s %s", "wrong-sha512", filename) + require.NoError(t, err, "could not write to file") + + err = f.Close() + require.NoError(t, err, "could not close file") + err = fsha512.Close() + require.NoError(t, err, "could not close .sha512 file") + + err = VerifySHA512HashWithCleanup(testlogger{t: t}, path) + assert.Errorf(t, err, "checksum verification should have failed") + + dirEntries, err := os.ReadDir(dir) + require.NoError(t, err, "could not read %q to check it's empty", dir) + if len(dirEntries) != 0 { + var files []string + for _, e := range dirEntries { + files = append(files, e.Name()) + } + + t.Errorf("there should be no files on %q. Found %v", dir, files) + } +} + +type testlogger struct { + t *testing.T +} + +func (l testlogger) Infof(format string, args ...interface{}) { + l.t.Logf("[INFO] "+format, args) +} +func (l testlogger) Warnf(format string, args ...interface{}) { + l.t.Logf("[WARN] "+format, args) +} + type MockClient struct { DoFunc func(req *http.Request) (*http.Response, error) } diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index a273460f337..cee0c1d75dc 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -143,23 +143,23 @@ func newDownloader(version *agtversion.ParsedSemVer, log *logger.Logger, setting } func newVerifier(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config) (download.Verifier, error) { - allowEmptyPgp, pgp := release.PGP() + pgp := release.PGP() if !version.IsSnapshot() { - return localremote.NewVerifier(log, settings, allowEmptyPgp, pgp) + return localremote.NewVerifier(log, settings, pgp) } - fsVerifier, err := fs.NewVerifier(log, settings, allowEmptyPgp, pgp) + fsVerifier, err := fs.NewVerifier(log, settings, pgp) if err != nil { return nil, err } - snapshotVerifier, err := snapshot.NewVerifier(log, settings, allowEmptyPgp, pgp, version) + snapshotVerifier, err := snapshot.NewVerifier(log, settings, pgp, version) if err != nil { return nil, err } - remoteVerifier, err := http.NewVerifier(log, settings, allowEmptyPgp, pgp) + remoteVerifier, err := http.NewVerifier(log, settings, pgp) if err != nil { return nil, err } diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index 91d470ac00b..319c0954b55 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -217,10 +217,6 @@ func run(override cfgOverrider, testingMode bool, fleetInitTimeout time.Duration l.Error(errors.New(err, "failed to invoke rollback watcher")) } - if allowEmptyPgp, _ := release.PGP(); allowEmptyPgp { - l.Info("Elastic Agent has been built with security disabled. Elastic Agent will not verify signatures of upgrade artifact.") - } - execPath, err := reexecPath() if err != nil { return err diff --git a/internal/pkg/release/pgp.go b/internal/pkg/release/pgp.go index 49d4cfc32a9..9644f6778f3 100644 --- a/internal/pkg/release/pgp.go +++ b/internal/pkg/release/pgp.go @@ -19,7 +19,7 @@ func init() { pgpBytes = packer.MustUnpack("eJyMlsuOtLoBhPd5jH9/JBua0U+ksxguhobGjI0v4B1gNDQ2NNPNTNNEeffoRMoqipR1Va1KVfr+8Sv5SP7I4+aPwbaP7do/hvbej7/+/utSge1SwZBynbFrQCh8/+RIhCy20TnekKyCkMV+VL3+8oEttMr2C1475/R2jvW3FkF6TpvXZXr/Lhj5zGNdisovWunBITR5OENKuRdSY44qxT/E4ICiMdZJVlazd2pssJMJOTT2AHHx3iYclLVKZI1bNtmMwfWQdlz6SI9Vst6wwTkxJCfVdqVAfWjX3pqZuE1NDixX2lod6AN9FA6eZY0vRMJkqLagn3BRxRi3sDk6uB59vAYE0kwB/NKOd29l8VOSNRJyX7nkRzHRXRv/KlhG+UIJjtWjSNe6cdT1AouTEPZNwLGuuILVgrA23GMSVZKhq4Yi1Mv600vksFi34Xw7OGh2DoOPHNIQC/Sqku3F+Rj2DmxysJqGKYORfejo80dHKtIGugqiskuzx2DsRyk0z6Et8bKy3MV7lZC8EPZycZDCNbp1YC/b9N2jL/88JOPEoYpasO8lwkwnt13a284P+6V5Rjo4ykKsNZuzEzVeqoF/uwBPUWc39P32ah0YqgkrBfCu+P5WJejWA4T6VAUdGzmZY5czWxcTispUBcSIkHCUFigTYsFKxqdnI3Q52LWmi1XFrJfQbOh/aYJlj27Oti4V7cCzQtrx3sc2HpKtlWidmwmHg0Xpf+dNLlm2aHk+qtkjnbOFDCAqYkQrjjIFRSTkOqnaLpXMAg1tQSBaqhi5FdcJlzoiEP2wOAslx4EGIqoSjchzTXCMAuLoO2fBdyHhlc32g9dZNcxroAB6CCRoIeHSHfi7OlRI4XhtJ9oQvjparDE9gnPF91XU5N6j4HuYehjO3qGmoBTOuPOJehSoR1cHsHHpQhzIabidB2uBmOzcR3YXL/+qoYKVwaWOt6Y98KHSVRGeqRzaRxFjO1x/5xgUL+qKlAD/0cynZ5eKiFnikWXMu/kTdLzw2tCfKmCOgYkvbW+udpCjHQ6Kmtqe9ztGzZ1KP+HO6WA2uHRk5UOCYxGrl5g064QiQ4zDBqI3xd6fDVhvfZpdhmSD0kDCkdr7OGM5WGeKEBisoDT0Cw7U3MbjVylFnMPRhtNosIvdHI7OEJ9fDT8B5Xi8Q9lba557Z2nVR8GtuPoZA/reCztXvPd6qzcO9rJLxxuZMpA7xZMv+EvFGuPwd65E9uBIe12srsLACdfBRtMg02l2FxB/DyabWwc2LMIOO7KmgNrRDmR0trSZt5AiVHHgnTDYAi51gPkquvf1rQGwxpU/6dq2fBrHDuGxAdveInzVs8/6Gl0HM14aKKic4WMwHq6A5oRnCT2Qx5dPr79ublP5s14Qwcbnoc3euKGOsMVP49pJu8VJik+XJhR2TJ/KVGQlE9eytrao/GZg4ruDAerYONFwu/XJ/t0jfudxRqW5QRnfXlX1yAv59NhsT0NEXA2DVCYi6uJ9bxz6XSTIU+Hm5DBz1VT8SCTyflFTA/qXFKtoRQxaq9vW8U89iJ/YHU8YZF/5cy3++iRqthS7+MyNcpQIEsGClh8qp8unR8G6txaHjRkrPm9jVz2AkrAuXFErqGa5jFA7WBDntDe1LRXMQGi2lS6N11r0UkfGK6GRXlY1RBYMtebEHaccqIQJHXRJFg2zuXeM1jTcqBC6VobSInr3ROXXjAX3/ggu2u1zWmvWvnxBU8E7d0w7s4lippSILOAcha1ASJksVCD7a8c5sSj/T+9tijHh6Kcyoqwm23b1qtp5uxTv61QAmBFuUSnHgE/nZ1ejXIB96t2x0GlW9YeCzDS7WLInuT5Ad/0NsUACO/6JMLQKk31gYxPN6K5cnBQsy0NLRz17OeawaLjgXaIXicx9MNSqyR6DoEQmPm1c+hxmPDKr4k4Ko53fO3dsUUQYMulfu9h3hBtgfvVVkdxyvGQbBnbpDawFpw1FgdPBIG0NfBRLvLeC2oGPdyZWNAiRlwhfeged24h+icm6fWQvHS9O5PXwiKuxlluBn6vDmHDLBC/arD+k1kcf4aSbuEfqsdbJWBZwzKXE30wqppMt00IvfNKiWM73hp33AvWOnj0xIA46cHryBOJwxrBfsOKptWIKShn6Fy6FUNzDGmhOzdnhfGQXB33zGaVcCreV6723VnWQljpRjy4dM81sIyVciREVr37nvQjiLtZVF/uLsNSU9SiUGZPWXUPMsgkfAfogN79k2Y98v/2bbS5clDT8Pxjo888/f/3zb/8KAAD///dAGpU=")["GPG-KEY-elasticsearch"] } -// PGP return pgpbytes and a flag describing whether or not no pgp is valid. -func PGP() (bool, []byte) { - return allowEmptyPgp == "true", pgpBytes +// PGP return pgpbytes. +func PGP() []byte { + return pgpBytes } diff --git a/internal/pkg/release/version.go b/internal/pkg/release/version.go index 5ff0be1dc29..93f78e5f4be 100644 --- a/internal/pkg/release/version.go +++ b/internal/pkg/release/version.go @@ -23,10 +23,6 @@ var snapshot = "" // complete is an environment variable marking the image as complete. var complete = "ELASTIC_AGENT_COMPLETE" -// allowEmptyPgp is used as a debug flag and allows working -// without valid pgp -var allowEmptyPgp string - // allowUpgrade is used as a debug flag and allows working // with upgrade without requiring Agent to be installed correctly var allowUpgrade string diff --git a/testing/integration/upgrade_gpg_test.go b/testing/integration/upgrade_gpg_test.go index 56d4378d147..e2001dc1eca 100644 --- a/testing/integration/upgrade_gpg_test.go +++ b/testing/integration/upgrade_gpg_test.go @@ -11,7 +11,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/release" @@ -55,7 +54,7 @@ func TestStandaloneUpgradeWithGPGFallback(t *testing.T) { t.Logf("Testing Elastic Agent upgrade from %s to %s...", define.Version(), upgradeToVersion) - _, defaultPGP := release.PGP() + defaultPGP := release.PGP() firstSeven := string(defaultPGP[:7]) newPgp := strings.Replace( string(defaultPGP), @@ -73,7 +72,7 @@ func TestStandaloneUpgradeWithGPGFallback(t *testing.T) { upgradetest.WithSourceURI(""), upgradetest.WithCustomPGP(customPGP), upgradetest.WithSkipVerify(false)) - assert.NoError(t, err) + require.NoError(t, err, "perform upgrade failed") } func TestStandaloneUpgradeWithGPGFallbackOneRemoteFailing(t *testing.T) { @@ -110,7 +109,7 @@ func TestStandaloneUpgradeWithGPGFallbackOneRemoteFailing(t *testing.T) { t.Logf("Testing Elastic Agent upgrade from %s to %s...", define.Version(), upgradeToVersion) - _, defaultPGP := release.PGP() + defaultPGP := release.PGP() firstSeven := string(defaultPGP[:7]) newPgp := strings.Replace( string(defaultPGP), @@ -129,4 +128,5 @@ func TestStandaloneUpgradeWithGPGFallbackOneRemoteFailing(t *testing.T) { upgradetest.WithSourceURI(""), upgradetest.WithCustomPGP(customPGP), upgradetest.WithSkipVerify(false)) + require.NoError(t, err, "perform upgrade failed") } diff --git a/testing/pgptest/pgp.go b/testing/pgptest/pgp.go new file mode 100644 index 00000000000..c6c441536bf --- /dev/null +++ b/testing/pgptest/pgp.go @@ -0,0 +1,42 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package pgptest + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/crypto/openpgp" //nolint:staticcheck // It still receives security fixes and it's just test code + "golang.org/x/crypto/openpgp/armor" //nolint:staticcheck // It still receives security fixes and it's just test code +) + +// Sing signs data using RSA. It creates the key, sings data and returns the +// ASCII armored public key and detached signature. +func Sing(t *testing.T, data io.Reader) ([]byte, []byte) { + pub := &bytes.Buffer{} + asc := &bytes.Buffer{} + + // Create a new key. The openpgp.Entity hold the private and public keys. + entity, err := openpgp.NewEntity("somekey", "", "", nil) + require.NoError(t, err, "could not create PGP key") + + // Create an encoder to serialize the public key. + wPubKey, err := armor.Encode(pub, openpgp.PublicKeyType, nil) + require.NoError(t, err, "could not create PGP ASCII Armor encoder") + + // Writes the public key to the io.Writer padded to armor.Encode. + // Use entity.SerializePrivate if you need the private key. + err = entity.Serialize(wPubKey) + require.NoError(t, err, "could not serialize the public key") + // cannot use defer as it needs to be closed before pub.Bytes() is invoked. + wPubKey.Close() + + err = openpgp.ArmoredDetachSign(asc, entity, data, nil) + require.NoError(t, err, "failed signing the data") + + return pub.Bytes(), asc.Bytes() +} From 92acf08344bd8d91eaf4de7dfce34527517307c1 Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Thu, 19 Oct 2023 16:28:23 -0700 Subject: [PATCH 11/15] Track upgrade details (#3527) * Remove context and handle cancellation internally instead * More optimizations * Add back context * Adding FSM for upgrades * Implementing TODO * WIP * WIP * Reorganizing imports * Running go mod tidy * Resolve deadlock * Add unit tests * Fix type * Renaming variable to avoid conflict with package name * Handle failures in one place * Set UPG_RESTARTING state * Remove Fleet changes * Add guard for action * Immediately notify observer when registered * Add UpgradeCompleted effect to observer doc * Fix initialization * Adding details progress observer and unit tests * Fixing booboos introduced during conflict resolution * Add unit test * Add assertion on error * Add comment on stateNeedsRefresh * Add comment linking to Fleet Server OpenAPI spec for UPG_* values * Use public accessor for setting upgrade details on coordinator to prevent data race * Use buffered channel for upgradeDetailsChan in test so test can run in single goroutine * Fixing unit test * Add mutex to prevent data race * Clarify assertion's intent * Make copy of details before notifying observer with it. * Add setter for setting download percent * Remove unnecessary struct tags * Change mutex type * Document FailedState and ErrorMsg fields * Track download rate as well * Change data type of time field * Rename struct to avoid stutter in naming * Log upgrade details when they change * Add nil guard * Setting logger in test * Use sentinel value for encoding +Inf download rate in JSON * Fix up comment * Set omitempty on failed_state and error_msg * Add units to download rate --- .../handlers/handler_action_upgrade_test.go | 3 +- .../application/coordinator/coordinator.go | 42 ++++- .../coordinator/coordinator_state.go | 21 ++- .../coordinator/coordinator_test.go | 61 ++++++- .../coordinator/coordinator_unit_test.go | 12 +- .../artifact/download/http/downloader.go | 24 +-- .../artifact/download/http/downloader_test.go | 13 +- .../download/http/progress_observer.go | 35 ++++ .../download/http/progress_observer_test.go | 36 ++++ .../artifact/download/http/verifier_test.go | 4 +- .../download/localremote/downloader.go | 7 +- .../artifact/download/snapshot/downloader.go | 5 +- .../application/upgrade/details/details.go | 166 ++++++++++++++++++ .../upgrade/details/details_test.go | 95 ++++++++++ .../application/upgrade/details/state.go | 22 +++ .../application/upgrade/step_download.go | 19 +- .../application/upgrade/step_download_test.go | 21 ++- .../pkg/agent/application/upgrade/upgrade.go | 15 +- 18 files changed, 547 insertions(+), 54 deletions(-) create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer_test.go create mode 100644 internal/pkg/agent/application/upgrade/details/details.go create mode 100644 internal/pkg/agent/application/upgrade/details/details_test.go create mode 100644 internal/pkg/agent/application/upgrade/details/state.go diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_upgrade_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade_test.go index 17de63af699..b917b00b900 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_upgrade_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade_test.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" @@ -35,7 +36,7 @@ func (u *mockUpgradeManager) Reload(rawConfig *config.Config) error { return nil } -func (u *mockUpgradeManager) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { +func (u *mockUpgradeManager) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, details *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { select { case <-time.After(2 * time.Second): u.msgChan <- "completed " + version diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index a17d3bf6199..0da3546ffa8 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -17,8 +17,10 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/capabilities" @@ -59,7 +61,7 @@ type UpgradeManager interface { Reload(rawConfig *config.Config) error // Upgrade upgrades running agent. - Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) + Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, details *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) // Ack is used on startup to check if the agent has upgraded and needs to send an ack for the action Ack(ctx context.Context, acker acker.Acker) error @@ -192,8 +194,12 @@ type Coordinator struct { // state should never be directly read or written outside the Coordinator // goroutine. Callers who need to access or modify the state should use the // public accessors like State(), SetLogLevel(), etc. - state State - stateBroadcaster *broadcaster.Broadcaster[State] + state State + stateBroadcaster *broadcaster.Broadcaster[State] + + // If you get a race detector error while accessing this field, it probably + // means you're calling private Coordinator methods from outside the + // Coordinator goroutine. stateNeedsRefresh bool // overrideState is used during the update process to report the overall @@ -204,6 +210,10 @@ type Coordinator struct { // SetOverrideState helper to the Coordinator goroutine. overrideStateChan chan *coordinatorOverrideState + // upgradeDetailsChan forwards upgrade details from the publicly accessible + // SetUpgradeDetails helper to the Coordinator goroutine. + upgradeDetailsChan chan *details.Details + // loglevelCh forwards log level changes from the public API (SetLogLevel) // to the run loop in Coordinator's main goroutine. logLevelCh chan logp.Level @@ -326,8 +336,9 @@ func New(logger *logger.Logger, cfg *configuration.Configuration, logLevel logp. // synchronization in the subscriber API, just set the input buffer to 0. stateBroadcaster: broadcaster.New(state, 64, 32), - logLevelCh: make(chan logp.Level), - overrideStateChan: make(chan *coordinatorOverrideState), + logLevelCh: make(chan logp.Level), + overrideStateChan: make(chan *coordinatorOverrideState), + upgradeDetailsChan: make(chan *details.Details), } // Setup communication channels for any non-nil components. This pattern // lets us transparently accept nil managers / simulated events during @@ -445,17 +456,33 @@ func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI str // override the overall state to upgrading until the re-execution is complete c.SetOverrideState(agentclient.Upgrading, fmt.Sprintf("Upgrading to version %s", version)) - cb, err := c.upgradeMgr.Upgrade(ctx, version, sourceURI, action, skipVerifyOverride, skipDefaultPgp, pgpBytes...) + + // initialize upgrade details + actionID := "" + if action != nil { + actionID = action.ActionID + } + det := details.NewDetails(version, details.StateRequested, actionID) + det.RegisterObserver(c.SetUpgradeDetails) + det.RegisterObserver(c.logUpgradeDetails) + + cb, err := c.upgradeMgr.Upgrade(ctx, version, sourceURI, action, det, skipVerifyOverride, skipDefaultPgp, pgpBytes...) if err != nil { c.ClearOverrideState() + det.Fail(err) return err } if cb != nil { + det.SetState(details.StateRestarting) c.ReExec(cb) } return nil } +func (c *Coordinator) logUpgradeDetails(details *details.Details) { + c.logger.Infow("updated upgrade details", "upgrade_details", details) +} + // AckUpgrade is the method used on startup to ack a previously successful upgrade action. // Called from external goroutines. func (c *Coordinator) AckUpgrade(ctx context.Context, acker acker.Acker) error { @@ -878,6 +905,9 @@ func (c *Coordinator) runLoopIteration(ctx context.Context) { case overrideState := <-c.overrideStateChan: c.setOverrideState(overrideState) + case upgradeDetails := <-c.upgradeDetailsChan: + c.setUpgradeDetails(upgradeDetails) + case componentState := <-c.managerChans.runtimeManagerUpdate: // New component change reported by the runtime manager via // Coordinator.watchRuntimeComponents(), merge it with the diff --git a/internal/pkg/agent/application/coordinator/coordinator_state.go b/internal/pkg/agent/application/coordinator/coordinator_state.go index f896f024733..6e645c3a06b 100644 --- a/internal/pkg/agent/application/coordinator/coordinator_state.go +++ b/internal/pkg/agent/application/coordinator/coordinator_state.go @@ -7,11 +7,13 @@ package coordinator import ( "fmt" - agentclient "github.com/elastic/elastic-agent/pkg/control/v2/client" - "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-libs/logp" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/pkg/component/runtime" + agentclient "github.com/elastic/elastic-agent/pkg/control/v2/client" ) // State provides the current state of the coordinator along with all the current states of components and units. @@ -30,6 +32,8 @@ type State struct { Components []runtime.ComponentComponentState `yaml:"components"` LogLevel logp.Level `yaml:"log_level"` + + UpgradeDetails *details.Details `yaml:"upgrade_details,omitempty"` } type coordinatorOverrideState struct { @@ -54,6 +58,11 @@ func (c *Coordinator) ClearOverrideState() { c.overrideStateChan <- nil } +// SetUpgradeDetails sets upgrade details. This is used during upgrades. +func (c *Coordinator) SetUpgradeDetails(upgradeDetails *details.Details) { + c.upgradeDetailsChan <- upgradeDetails +} + // setRuntimeManagerError updates the error state for the runtime manager. // Called on the main Coordinator goroutine. func (c *Coordinator) setRuntimeManagerError(err error) { @@ -114,6 +123,13 @@ func (c *Coordinator) setOverrideState(overrideState *coordinatorOverrideState) c.stateNeedsRefresh = true } +// setUpgradeDetails is the internal helper to set upgrade details and set stateNeedsRefresh. +// Must be called on the main Coordinator goroutine. +func (c *Coordinator) setUpgradeDetails(upgradeDetails *details.Details) { + c.state.UpgradeDetails = upgradeDetails + c.stateNeedsRefresh = true +} + // Forward the current state to the broadcaster and clear the stateNeedsRefresh // flag. Must be called on the main Coordinator goroutine. func (c *Coordinator) refreshState() { @@ -163,6 +179,7 @@ func (c *Coordinator) generateReportableState() (s State) { s.FleetState = c.state.FleetState s.FleetMessage = c.state.FleetMessage s.LogLevel = c.state.LogLevel + s.UpgradeDetails = c.state.UpgradeDetails s.Components = make([]runtime.ComponentComponentState, len(c.state.Components)) copy(s.Components, c.state.Components) diff --git a/internal/pkg/agent/application/coordinator/coordinator_test.go b/internal/pkg/agent/application/coordinator/coordinator_test.go index e84b43f182c..131b91b447a 100644 --- a/internal/pkg/agent/application/coordinator/coordinator_test.go +++ b/internal/pkg/agent/application/coordinator/coordinator_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" + "github.com/stretchr/testify/assert" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -471,8 +473,50 @@ func TestCoordinator_Upgrade(t *testing.T) { require.NoError(t, err) } +func TestCoordinator_UpgradeDetails(t *testing.T) { + coordCh := make(chan error) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + expectedErr := errors.New("some upgrade error") + upgradeManager := &fakeUpgradeManager{ + upgradeable: true, + upgradeErr: expectedErr, + } + coord, cfgMgr, varsMgr := createCoordinator(t, ctx, WithUpgradeManager(upgradeManager)) + require.Nil(t, coord.state.UpgradeDetails) + go func() { + err := coord.Run(ctx) + if errors.Is(err, context.Canceled) { + // allowed error + err = nil + } + coordCh <- err + }() + + // no vars used by the config + varsMgr.Vars(ctx, []*transpiler.Vars{{}}) + + // no need for anything to really run + cfg, err := config.NewConfigFrom(nil) + require.NoError(t, err) + cfgMgr.Config(ctx, cfg) + + err = coord.Upgrade(ctx, "9.0.0", "", nil, true, false) + require.ErrorIs(t, expectedErr, err) + cancel() + + err = <-coordCh + require.NoError(t, err) + + require.Equal(t, details.StateFailed, coord.state.UpgradeDetails.State) + require.Equal(t, details.StateRequested, coord.state.UpgradeDetails.Metadata.FailedState) + require.Equal(t, expectedErr.Error(), coord.state.UpgradeDetails.Metadata.ErrorMsg) +} + type createCoordinatorOpts struct { - managed bool + managed bool + upgradeManager UpgradeManager } type CoordinatorOpt func(o *createCoordinatorOpts) @@ -483,6 +527,12 @@ func ManagedCoordinator(managed bool) CoordinatorOpt { } } +func WithUpgradeManager(upgradeManager UpgradeManager) CoordinatorOpt { + return func(o *createCoordinatorOpts) { + o.upgradeManager = upgradeManager + } +} + // createCoordinator creates a coordinator that using a fake config manager and a fake vars manager. // // The runtime specifications is set up to use both the fake component and fake shipper. @@ -527,7 +577,12 @@ func createCoordinator(t *testing.T, ctx context.Context, opts ...CoordinatorOpt cfgMgr := newFakeConfigManager() varsMgr := newFakeVarsManager() - coord := New(l, nil, logp.DebugLevel, ai, specs, &fakeReExecManager{}, &fakeUpgradeManager{}, rm, cfgMgr, varsMgr, caps, monitoringMgr, o.managed) + upgradeManager := o.upgradeManager + if upgradeManager == nil { + upgradeManager = &fakeUpgradeManager{} + } + + coord := New(l, nil, logp.DebugLevel, ai, specs, &fakeReExecManager{}, upgradeManager, rm, cfgMgr, varsMgr, caps, monitoringMgr, o.managed) return coord, cfgMgr, varsMgr } @@ -574,7 +629,7 @@ func (f *fakeUpgradeManager) Reload(cfg *config.Config) error { return nil } -func (f *fakeUpgradeManager) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { +func (f *fakeUpgradeManager) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, details *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { f.upgradeCalled = true if f.upgradeErr != nil { return nil, f.upgradeErr diff --git a/internal/pkg/agent/application/coordinator/coordinator_unit_test.go b/internal/pkg/agent/application/coordinator/coordinator_unit_test.go index 805139f26e8..5752c100a41 100644 --- a/internal/pkg/agent/application/coordinator/coordinator_unit_test.go +++ b/internal/pkg/agent/application/coordinator/coordinator_unit_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/component" @@ -811,6 +812,9 @@ func TestCoordinatorInitiatesUpgrade(t *testing.T) { // since a successful upgrade sets the override state twice. overrideStateChan := make(chan *coordinatorOverrideState, 2) + // similarly, upgradeDetailsChan is a buffered channel as well. + upgradeDetailsChan := make(chan *details.Details, 2) + // Create a manager that will allow upgrade attempts but return a failure // from Upgrade itself (success requires testing ReExec and we aren't // quite ready to do that yet). @@ -820,9 +824,11 @@ func TestCoordinatorInitiatesUpgrade(t *testing.T) { } coord := &Coordinator{ - stateBroadcaster: broadcaster.New(State{}, 0, 0), - overrideStateChan: overrideStateChan, - upgradeMgr: upgradeMgr, + stateBroadcaster: broadcaster.New(State{}, 0, 0), + overrideStateChan: overrideStateChan, + upgradeDetailsChan: upgradeDetailsChan, + upgradeMgr: upgradeMgr, + logger: logp.NewLogger("testing"), } // Call upgrade and make sure the upgrade manager receives an Upgrade call diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index a02585e5938..50fc6849f21 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -20,6 +20,7 @@ import ( "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -43,13 +44,14 @@ const ( // Downloader is a downloader able to fetch artifacts from elastic.co web page. type Downloader struct { - log *logger.Logger - config *artifact.Config - client http.Client + log *logger.Logger + config *artifact.Config + client http.Client + upgradeDetails *details.Details } // NewDownloader creates and configures Elastic Downloader -func NewDownloader(log *logger.Logger, config *artifact.Config) (*Downloader, error) { +func NewDownloader(log *logger.Logger, config *artifact.Config, upgradeDetails *details.Details) (*Downloader, error) { client, err := config.HTTPTransportSettings.Client( httpcommon.WithAPMHTTPInstrumentation(), httpcommon.WithKeepaliveSettings{Disable: false, IdleConnTimeout: 30 * time.Second}, @@ -59,15 +61,16 @@ func NewDownloader(log *logger.Logger, config *artifact.Config) (*Downloader, er } client.Transport = download.WithHeaders(client.Transport, download.Headers) - return NewDownloaderWithClient(log, config, *client), nil + return NewDownloaderWithClient(log, config, *client, upgradeDetails), nil } // NewDownloaderWithClient creates Elastic Downloader with specific client used -func NewDownloaderWithClient(log *logger.Logger, config *artifact.Config, client http.Client) *Downloader { +func NewDownloaderWithClient(log *logger.Logger, config *artifact.Config, client http.Client, upgradeDetails *details.Details) *Downloader { return &Downloader{ - log: log, - config: config, - client: client, + log: log, + config: config, + client: client, + upgradeDetails: upgradeDetails, } } @@ -206,7 +209,8 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f } loggingObserver := newLoggingProgressObserver(e.log, e.config.HTTPTransportSettings.Timeout) - dp := newDownloadProgressReporter(sourceURI, e.config.HTTPTransportSettings.Timeout, fileSize, loggingObserver) + detailsObserver := newDetailsProgressObserver(e.upgradeDetails) + dp := newDownloadProgressReporter(sourceURI, e.config.HTTPTransportSettings.Timeout, fileSize, loggingObserver, detailsObserver) dp.Report(ctx) _, err = io.Copy(destinationFile, io.TeeReader(resp.Body, dp)) if err != nil { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 4c341a2aa5e..94e3ce856e2 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -21,6 +21,7 @@ import ( "go.uber.org/zap/zaptest/observer" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/docker/go-units" @@ -56,7 +57,8 @@ func TestDownload(t *testing.T) { config.OperatingSystem = testCase.system config.Architecture = testCase.arch - testClient := NewDownloaderWithClient(log, config, elasticClient) + upgradeDetails := details.NewDetails("8.12.0", details.StateRequested, "") + testClient := NewDownloaderWithClient(log, config, elasticClient, upgradeDetails) artifactPath, err := testClient.Download(context.Background(), beatSpec, version) if err != nil { t.Fatal(err) @@ -105,7 +107,8 @@ func TestDownloadBodyError(t *testing.T) { } log, obs := logger.NewTesting("downloader") - testClient := NewDownloaderWithClient(log, config, *client) + upgradeDetails := details.NewDetails("8.12.0", details.StateRequested, "") + testClient := NewDownloaderWithClient(log, config, *client, upgradeDetails) artifactPath, err := testClient.Download(context.Background(), beatSpec, version) os.Remove(artifactPath) if err == nil { @@ -161,7 +164,8 @@ func TestDownloadLogProgressWithLength(t *testing.T) { } log, obs := logger.NewTesting("downloader") - testClient := NewDownloaderWithClient(log, config, *client) + upgradeDetails := details.NewDetails("8.12.0", details.StateRequested, "") + testClient := NewDownloaderWithClient(log, config, *client, upgradeDetails) artifactPath, err := testClient.Download(context.Background(), beatSpec, version) os.Remove(artifactPath) require.NoError(t, err, "Download should not have errored") @@ -243,7 +247,8 @@ func TestDownloadLogProgressWithoutLength(t *testing.T) { } log, obs := logger.NewTesting("downloader") - testClient := NewDownloaderWithClient(log, config, *client) + upgradeDetails := details.NewDetails("8.12.0", details.StateRequested, "") + testClient := NewDownloaderWithClient(log, config, *client, upgradeDetails) artifactPath, err := testClient.Download(context.Background(), beatSpec, version) os.Remove(artifactPath) require.NoError(t, err, "Download should not have errored") diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer.go b/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer.go index 4eef0682a50..ca024c53c88 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer.go @@ -5,10 +5,12 @@ package http import ( + "sync" "time" "github.com/docker/go-units" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -95,3 +97,36 @@ func (lpObs *loggingProgressObserver) ReportFailed(sourceURI string, timePast ti lpObs.log.Warnf(msg, args...) } } + +type detailsProgressObserver struct { + upgradeDetails *details.Details + mu sync.RWMutex +} + +func newDetailsProgressObserver(upgradeDetails *details.Details) *detailsProgressObserver { + upgradeDetails.SetState(details.StateDownloading) + return &detailsProgressObserver{ + upgradeDetails: upgradeDetails, + } +} + +func (dpObs *detailsProgressObserver) Report(sourceURI string, timePast time.Duration, downloadedBytes, totalBytes, percentComplete, downloadRateBytesPerSecond float64) { + dpObs.mu.Lock() + defer dpObs.mu.Unlock() + + dpObs.upgradeDetails.SetDownloadProgress(percentComplete, downloadRateBytesPerSecond) +} + +func (dpObs *detailsProgressObserver) ReportCompleted(sourceURI string, timePast time.Duration, downloadRateBytesPerSecond float64) { + dpObs.mu.Lock() + defer dpObs.mu.Unlock() + + dpObs.upgradeDetails.SetDownloadProgress(1, downloadRateBytesPerSecond) +} + +func (dpObs *detailsProgressObserver) ReportFailed(sourceURI string, timePast time.Duration, downloadedBytes, totalBytes, percentComplete, downloadRateBytesPerSecond float64, err error) { + dpObs.mu.Lock() + defer dpObs.mu.Unlock() + + dpObs.upgradeDetails.Fail(err) +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer_test.go new file mode 100644 index 00000000000..bb1d7ac1c87 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/progress_observer_test.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package http + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/docker/go-units" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" +) + +func TestDetailsProgressObserver(t *testing.T) { + upgradeDetails := details.NewDetails("8.11.0", details.StateRequested, "") + detailsObs := newDetailsProgressObserver(upgradeDetails) + + detailsObs.Report("http://some/uri", 20*time.Second, 400*units.MiB, 500*units.MiB, 0.8, 4455) + require.Equal(t, details.StateDownloading, upgradeDetails.State) + require.Equal(t, 0.8, upgradeDetails.Metadata.DownloadPercent) + + detailsObs.ReportCompleted("http://some/uri", 30*time.Second, 3333) + require.Equal(t, details.StateDownloading, upgradeDetails.State) + require.Equal(t, 1.0, upgradeDetails.Metadata.DownloadPercent) + + err := errors.New("some download error") + detailsObs.ReportFailed("http://some/uri", 30*time.Second, 450*units.MiB, 500*units.MiB, 0.9, 1122, err) + require.Equal(t, details.StateFailed, upgradeDetails.State) + require.Equal(t, details.StateDownloading, upgradeDetails.Metadata.FailedState) + require.Equal(t, err.Error(), upgradeDetails.Metadata.ErrorMsg) +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go index c8f3405f404..66c8bd715e0 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -46,7 +47,8 @@ func TestVerify(t *testing.T) { config.OperatingSystem = testCase.system config.Architecture = testCase.arch - testClient := NewDownloaderWithClient(log, config, elasticClient) + upgradeDetails := details.NewDetails("8.12.0", details.StateRequested, "") + testClient := NewDownloaderWithClient(log, config, elasticClient, upgradeDetails) artifact, err := testClient.Download(context.Background(), beatSpec, version) if err != nil { t.Fatal(err) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go index 78cef03e578..023c15a5272 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go @@ -11,13 +11,14 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(log *logger.Logger, config *artifact.Config) (download.Downloader, error) { +func NewDownloader(log *logger.Logger, config *artifact.Config, upgradeDetails *details.Details) (download.Downloader, error) { downloaders := make([]download.Downloader, 0, 3) downloaders = append(downloaders, fs.NewDownloader(config)) @@ -26,7 +27,7 @@ func NewDownloader(log *logger.Logger, config *artifact.Config) (download.Downlo // a snapshot version of fleet, for example. // try snapshot repo before official if release.Snapshot() { - snapDownloader, err := snapshot.NewDownloader(log, config, nil) + snapDownloader, err := snapshot.NewDownloader(log, config, nil, upgradeDetails) if err != nil { log.Error(err) } else { @@ -34,7 +35,7 @@ func NewDownloader(log *logger.Logger, config *artifact.Config) (download.Downlo } } - httpDownloader, err := http.NewDownloader(log, config) + httpDownloader, err := http.NewDownloader(log, config, upgradeDetails) if err != nil { return nil, err } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index 51b16ee4372..ecf2497851c 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" agtversion "github.com/elastic/elastic-agent/pkg/version" @@ -32,13 +33,13 @@ type Downloader struct { // We need to pass the versionOverride separately from the config as // artifact.Config struct is part of agent configuration and a version // override makes no sense there -func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride *agtversion.ParsedSemVer) (download.Downloader, error) { +func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride *agtversion.ParsedSemVer, upgradeDetails *details.Details) (download.Downloader, error) { cfg, err := snapshotConfig(config, versionOverride) if err != nil { return nil, fmt.Errorf("error creating snapshot config: %w", err) } - httpDownloader, err := http.NewDownloader(log, cfg) + httpDownloader, err := http.NewDownloader(log, cfg, upgradeDetails) if err != nil { return nil, fmt.Errorf("failed to create snapshot downloader: %w", err) } diff --git a/internal/pkg/agent/application/upgrade/details/details.go b/internal/pkg/agent/application/upgrade/details/details.go new file mode 100644 index 00000000000..028990aafcd --- /dev/null +++ b/internal/pkg/agent/application/upgrade/details/details.go @@ -0,0 +1,166 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package details + +import ( + "encoding/json" + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/docker/go-units" +) + +// downloadRate is a float64 that can be safely marshalled to JSON +// when the value is Infinity. The rate is always in bytes/second units. +type downloadRate float64 + +// Observer is a function that will be called with upgrade details +type Observer func(details *Details) + +// Details consists of details regarding an ongoing upgrade. +type Details struct { + TargetVersion string `json:"target_version"` + State State `json:"state"` + ActionID string `json:"action_id,omitempty"` + Metadata Metadata `json:"metadata"` + + observers []Observer + mu sync.Mutex +} + +// Metadata consists of metadata relating to a specific upgrade state +type Metadata struct { + ScheduledAt time.Time `json:"scheduled_at,omitempty"` + DownloadPercent float64 `json:"download_percent,omitempty"` + DownloadRate downloadRate `json:"download_rate,omitempty"` + + // FailedState is the state an upgrade was in if/when it failed. Use the + // Fail() method of UpgradeDetails to correctly record details when + // an upgrade fails. + FailedState State `json:"failed_state,omitempty"` + + // ErrorMsg is any error message encountered if/when an upgrade fails. Use + // the Fail() method of UpgradeDetails to correctly record details when + // an upgrade fails. + ErrorMsg string `json:"error_msg,omitempty"` +} + +func NewDetails(targetVersion string, initialState State, actionID string) *Details { + return &Details{ + TargetVersion: targetVersion, + State: initialState, + ActionID: actionID, + Metadata: Metadata{}, + observers: []Observer{}, + } +} + +// SetState is a convenience method to set the state of the upgrade and +// notify all observers. +func (d *Details) SetState(s State) { + d.mu.Lock() + defer d.mu.Unlock() + + d.State = s + d.notifyObservers() +} + +// SetDownloadProgress is a convenience method to set the download percent +// when the upgrade is in UPG_DOWNLOADING state. +func (d *Details) SetDownloadProgress(percent, rateBytesPerSecond float64) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Metadata.DownloadPercent = percent + d.Metadata.DownloadRate = downloadRate(rateBytesPerSecond) + d.notifyObservers() +} + +// Fail is a convenience method to set the state of the upgrade +// to StateFailed, set metadata associated with the failure, and +// notify all observers. +func (d *Details) Fail(err error) { + d.mu.Lock() + defer d.mu.Unlock() + + // Record the state the upgrade process was in right before it + // failed, but only do this if we haven't already transitioned the + // state to the StateFailed state; otherwise we'll just end up recording + // the state we failed from as StateFailed which is not useful. + if d.State != StateFailed { + d.Metadata.FailedState = d.State + } + + d.Metadata.ErrorMsg = err.Error() + d.State = StateFailed + d.notifyObservers() +} + +// RegisterObserver allows an interested consumer of Details to register +// themselves as an Observer. The registered observer is immediately notified +// of the current upgrade details. +func (d *Details) RegisterObserver(observer Observer) { + d.mu.Lock() + defer d.mu.Unlock() + + d.observers = append(d.observers, observer) + d.notifyObserver(observer) +} + +func (d *Details) notifyObservers() { + for _, observer := range d.observers { + d.notifyObserver(observer) + } +} + +func (d *Details) notifyObserver(observer Observer) { + if d.State == StateCompleted { + observer(nil) + } else { + dCopy := Details{ + TargetVersion: d.TargetVersion, + State: d.State, + ActionID: d.ActionID, + Metadata: d.Metadata, + } + observer(&dCopy) + } +} + +func (dr *downloadRate) MarshalJSON() ([]byte, error) { + downloadRateBytesPerSecond := float64(*dr) + if math.IsInf(downloadRateBytesPerSecond, 0) { + return json.Marshal("+Inf bps") + } + + return json.Marshal( + fmt.Sprintf("%sps", units.HumanSizeWithPrecision(downloadRateBytesPerSecond, 2)), + ) +} + +func (dr *downloadRate) UnmarshalJSON(data []byte) error { + var downloadRateStr string + err := json.Unmarshal(data, &downloadRateStr) + if err != nil { + return err + } + + if downloadRateStr == "+Inf bps" { + *dr = downloadRate(math.Inf(1)) + return nil + } + + downloadRateStr = strings.TrimSuffix(downloadRateStr, "ps") + downloadRateBytesPerSecond, err := units.FromHumanSize(downloadRateStr) + if err != nil { + return err + } + + *dr = downloadRate(downloadRateBytesPerSecond) + return nil +} diff --git a/internal/pkg/agent/application/upgrade/details/details_test.go b/internal/pkg/agent/application/upgrade/details/details_test.go new file mode 100644 index 00000000000..88d239dfabf --- /dev/null +++ b/internal/pkg/agent/application/upgrade/details/details_test.go @@ -0,0 +1,95 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package details + +import ( + "encoding/json" + "errors" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDetailsNew(t *testing.T) { + det := NewDetails("99.999.9999", StateRequested, "test_action_id") + require.Equal(t, StateRequested, det.State) + require.Equal(t, "99.999.9999", det.TargetVersion) + require.Equal(t, "test_action_id", det.ActionID) + require.Equal(t, Metadata{}, det.Metadata) +} + +func TestDetailsSetState(t *testing.T) { + det := NewDetails("99.999.9999", StateRequested, "test_action_id") + require.Equal(t, StateRequested, det.State) + + det.SetState(StateDownloading) + require.Equal(t, StateDownloading, det.State) +} + +func TestDetailsFail(t *testing.T) { + det := NewDetails("99.999.9999", StateRequested, "test_action_id") + require.Equal(t, StateRequested, det.State) + + err := errors.New("test error") + det.Fail(err) + require.Equal(t, StateFailed, det.State) + require.Equal(t, StateRequested, det.Metadata.FailedState) + require.Equal(t, err.Error(), det.Metadata.ErrorMsg) +} + +func TestDetailsObserver(t *testing.T) { + det := NewDetails("99.999.9999", StateRequested, "test_action_id") + require.Equal(t, StateRequested, det.State) + + var observedDetails *Details + obs := func(updatedDetails *Details) { observedDetails = updatedDetails } + + det.RegisterObserver(obs) + require.Len(t, det.observers, 1) + require.NotNil(t, observedDetails) + require.Equal(t, StateRequested, observedDetails.State) + + det.SetState(StateDownloading) + require.Equal(t, StateDownloading, det.State) + require.Equal(t, StateDownloading, observedDetails.State) + + det.SetState(StateCompleted) + require.Equal(t, StateCompleted, det.State) + require.Nil(t, nil, observedDetails) +} + +func TestDetailsDownloadRateJSON(t *testing.T) { + det := NewDetails("99.999.9999", StateRequested, "test_action_id") + + // Normal (non-infinity) download rate + t.Run("non_infinity", func(t *testing.T) { + det.SetDownloadProgress(.8, 1794.7) + + data, err := json.Marshal(det) + require.NoError(t, err) + + var unmarshalledDetails Details + err = json.Unmarshal(data, &unmarshalledDetails) + require.NoError(t, err) + require.Equal(t, float64(1800), float64(unmarshalledDetails.Metadata.DownloadRate)) + require.Equal(t, .8, unmarshalledDetails.Metadata.DownloadPercent) + }) + + // Infinity download rate + t.Run("infinity", func(t *testing.T) { + det.SetDownloadProgress(0.99, math.Inf(1)) + + data, err := json.Marshal(det) + require.NoError(t, err) + + var unmarshalledDetails Details + err = json.Unmarshal(data, &unmarshalledDetails) + require.NoError(t, err) + require.Equal(t, math.Inf(1), float64(unmarshalledDetails.Metadata.DownloadRate)) + require.Equal(t, 0.99, unmarshalledDetails.Metadata.DownloadPercent) + }) + +} diff --git a/internal/pkg/agent/application/upgrade/details/state.go b/internal/pkg/agent/application/upgrade/details/state.go new file mode 100644 index 00000000000..19aaaae8a25 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/details/state.go @@ -0,0 +1,22 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package details + +type State string + +// The values of these State* constants should match those enumerated for +// upgrade_details.state in https://github.com/elastic/fleet-server/blob/main/model/openapi.yml +const ( + StateRequested State = "UPG_REQUESTED" + StateScheduled State = "UPG_SCHEDULED" + StateDownloading State = "UPG_DOWNLOADING" + StateExtracting State = "UPG_EXTRACTING" + StateReplacing State = "UPG_REPLACING" + StateRestarting State = "UPG_RESTARTING" + StateWatching State = "UPG_WATCHING" + StateRollback State = "UPG_ROLLBACK" + StateCompleted State = "UPG_COMPLETED" + StateFailed State = "UPG_FAILED" +) diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index cee0c1d75dc..d86a43a5a3b 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -12,6 +12,8 @@ import ( "strings" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" + "github.com/cenkalti/backoff/v4" "go.elastic.co/apm" @@ -35,7 +37,7 @@ const ( fleetUpgradeFallbackPGPFormat = "/api/agents/upgrades/%d.%d.%d/pgp-public-key" ) -func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI string, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ string, err error) { +func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI string, upgradeDetails *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ string, err error) { span, ctx := apm.StartSpan(ctx, "downloadArtifact", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() @@ -69,7 +71,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri return "", errors.New(err, fmt.Sprintf("failed to create download directory at %s", paths.Downloads())) } - path, err := u.downloadWithRetries(ctx, newDownloader, parsedVersion, &settings) + path, err := u.downloadWithRetries(ctx, newDownloader, parsedVersion, &settings, upgradeDetails) if err != nil { return "", errors.New(err, "failed download of agent binary") } @@ -121,20 +123,20 @@ func (u *Upgrader) appendFallbackPGP(targetVersion string, pgpBytes []string) [] return pgpBytes } -func newDownloader(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { +func newDownloader(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config, upgradeDetails *details.Details) (download.Downloader, error) { if !version.IsSnapshot() { - return localremote.NewDownloader(log, settings) + return localremote.NewDownloader(log, settings, upgradeDetails) } // TODO since we know if it's a snapshot or not, shouldn't we add EITHER the snapshot downloader OR the release one ? // try snapshot repo before official - snapDownloader, err := snapshot.NewDownloader(log, settings, version) + snapDownloader, err := snapshot.NewDownloader(log, settings, version, upgradeDetails) if err != nil { return nil, err } - httpDownloader, err := http.NewDownloader(log, settings) + httpDownloader, err := http.NewDownloader(log, settings, upgradeDetails) if err != nil { return nil, err } @@ -169,9 +171,10 @@ func newVerifier(version *agtversion.ParsedSemVer, log *logger.Logger, settings func (u *Upgrader) downloadWithRetries( ctx context.Context, - downloaderCtor func(*agtversion.ParsedSemVer, *logger.Logger, *artifact.Config) (download.Downloader, error), + downloaderCtor func(*agtversion.ParsedSemVer, *logger.Logger, *artifact.Config, *details.Details) (download.Downloader, error), version *agtversion.ParsedSemVer, settings *artifact.Config, + upgradeDetails *details.Details, ) (string, error) { cancelCtx, cancel := context.WithTimeout(ctx, settings.Timeout) defer cancel() @@ -187,7 +190,7 @@ func (u *Upgrader) downloadWithRetries( attempt++ u.log.Infof("download attempt %d", attempt) - downloader, err := downloaderCtor(version, u.log, settings) + downloader, err := downloaderCtor(version, u.log, settings, upgradeDetails) if err != nil { return fmt.Errorf("unable to create fetcher: %w", err) } diff --git a/internal/pkg/agent/application/upgrade/step_download_test.go b/internal/pkg/agent/application/upgrade/step_download_test.go index 330a60f5288..dcdc4da7de8 100644 --- a/internal/pkg/agent/application/upgrade/step_download_test.go +++ b/internal/pkg/agent/application/upgrade/step_download_test.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" agtversion "github.com/elastic/elastic-agent/pkg/version" @@ -84,14 +85,15 @@ func TestDownloadWithRetries(t *testing.T) { // Successful immediately (no retries) t.Run("successful_immediately", func(t *testing.T) { - mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { + mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config, upgradeDetails *details.Details) (download.Downloader, error) { return &mockDownloader{expectedDownloadPath, nil}, nil } u := NewUpgrader(testLogger, &settings, &info.AgentInfo{}) parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings) + upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings, upgradeDetails) require.NoError(t, err) require.Equal(t, expectedDownloadPath, path) @@ -103,7 +105,7 @@ func TestDownloadWithRetries(t *testing.T) { // Downloader constructor failing on first attempt, but succeeding on second attempt (= first retry) t.Run("constructor_failure_once", func(t *testing.T) { attemptIdx := 0 - mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { + mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config, upgradeDetails *details.Details) (download.Downloader, error) { defer func() { attemptIdx++ }() @@ -125,7 +127,8 @@ func TestDownloadWithRetries(t *testing.T) { u := NewUpgrader(testLogger, &settings, &info.AgentInfo{}) parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings) + upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings, upgradeDetails) require.NoError(t, err) require.Equal(t, expectedDownloadPath, path) @@ -139,7 +142,7 @@ func TestDownloadWithRetries(t *testing.T) { // Download failing on first attempt, but succeeding on second attempt (= first retry) t.Run("download_failure_once", func(t *testing.T) { attemptIdx := 0 - mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { + mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config, upgradeDetails *details.Details) (download.Downloader, error) { defer func() { attemptIdx++ }() @@ -161,7 +164,8 @@ func TestDownloadWithRetries(t *testing.T) { u := NewUpgrader(testLogger, &settings, &info.AgentInfo{}) parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings) + upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings, upgradeDetails) require.NoError(t, err) require.Equal(t, expectedDownloadPath, path) @@ -178,14 +182,15 @@ func TestDownloadWithRetries(t *testing.T) { testCaseSettings.Timeout = 200 * time.Millisecond testCaseSettings.RetrySleepInitDuration = 100 * time.Millisecond - mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { + mockDownloaderCtor := func(version *agtversion.ParsedSemVer, log *logger.Logger, settings *artifact.Config, upgradeDetails *details.Details) (download.Downloader, error) { return &mockDownloader{"", errors.New("download failed")}, nil } u := NewUpgrader(testLogger, &settings, &info.AgentInfo{}) parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &testCaseSettings) + upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &testCaseSettings, upgradeDetails) require.Equal(t, "context deadline exceeded", err.Error()) require.Equal(t, "", path) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index eaf51ef7684..36276f239b6 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -28,7 +29,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/pkg/control/v2/client" "github.com/elastic/elastic-agent/pkg/control/v2/cproto" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -127,7 +127,7 @@ func (u *Upgrader) Upgradeable() bool { } // Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. -func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { +func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, det *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { u.log.Infow("Upgrading agent", "version", version, "source_uri", sourceURI) span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() @@ -137,17 +137,22 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string u.log.Errorw("Unable to clean downloads before update", "error.message", err, "downloads.path", paths.Downloads()) } + det.SetState(details.StateDownloading) + sourceURI = u.sourceURI(sourceURI) - archivePath, err := u.downloadArtifact(ctx, version, sourceURI, skipVerifyOverride, skipDefaultPgp, pgpBytes...) + archivePath, err := u.downloadArtifact(ctx, version, sourceURI, det, skipVerifyOverride, skipDefaultPgp, pgpBytes...) if err != nil { // Run the same pre-upgrade cleanup task to get rid of any newly downloaded files // This may have an issue if users are upgrading to the same version number. if dErr := cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()); dErr != nil { u.log.Errorw("Unable to remove file after verification failure", "error.message", dErr) } + return nil, err } + det.SetState(details.StateExtracting) + newHash, err := u.unpack(version, archivePath) if err != nil { return nil, err @@ -170,6 +175,8 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, errors.New(err, "failed to copy run directory") } + det.SetState(details.StateReplacing) + if err := ChangeSymlink(ctx, u.log, newHash); err != nil { u.log.Errorw("Rolling back: changing symlink failed", "error.message", err) rollbackInstall(ctx, u.log, newHash) @@ -182,6 +189,8 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, err } + det.SetState(details.StateWatching) + if err := InvokeWatcher(u.log); err != nil { u.log.Errorw("Rolling back: starting watcher failed", "error.message", err) rollbackInstall(ctx, u.log, newHash) From 3c9380f66b4ace6e201dabe890cdf373aa15e69e Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 23 Oct 2023 09:21:48 +0200 Subject: [PATCH 12/15] Update go grpc version to 1.58.3 (#3635) --- NOTICE.txt | 38 +++++++++++++++++++------------------- go.mod | 18 +++++++++--------- go.sum | 34 ++++++++++++++++++---------------- 3 files changed, 46 insertions(+), 44 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 3290e34d2e5..2eb7dcd2da0 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -6173,11 +6173,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.7.0 +Version: v0.11.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.7.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.11.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -6321,11 +6321,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.9.0 +Version: v0.10.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.9.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.10.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -6358,11 +6358,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/text -Version: v0.9.0 +Version: v0.11.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.9.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.11.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -6469,11 +6469,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/grpc -Version: v1.53.0 +Version: v1.58.3 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.53.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.58.3/LICENSE: Apache License @@ -6681,11 +6681,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.53.0/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.29.1 +Version: v1.31.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.29.1/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.31.0/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. @@ -17494,11 +17494,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/net -Version: v0.9.0 +Version: v0.12.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.9.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.12.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -17531,11 +17531,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/oauth2 -Version: v0.4.0 +Version: v0.10.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.4.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.10.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -17568,11 +17568,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/term -Version: v0.7.0 +Version: v0.10.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.7.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.10.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -17816,12 +17816,12 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/appengine@v1.6.7 -------------------------------------------------------------------------------- -Dependency : google.golang.org/genproto -Version: v0.0.0-20230110181048-76db0878b65f +Dependency : google.golang.org/genproto/googleapis/rpc +Version: v0.0.0-20230711160842-782d3b101e98 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20230110181048-76db0878b65f/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20230711160842-782d3b101e98/LICENSE: Apache License diff --git a/go.mod b/go.mod index 48c55e7e5a7..4cab4536513 100644 --- a/go.mod +++ b/go.mod @@ -53,16 +53,16 @@ require ( go.elastic.co/ecszap v1.0.1 go.elastic.co/go-licence-detector v0.5.0 go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.7.0 + golang.org/x/crypto v0.11.0 golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.9.0 - golang.org/x/text v0.9.0 + golang.org/x/sys v0.10.0 + golang.org/x/text v0.11.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.7.0 - google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.29.1 + google.golang.org/grpc v1.58.3 + google.golang.org/protobuf v1.31.0 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -146,11 +146,11 @@ require ( go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/term v0.7.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/term v0.10.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 // indirect gopkg.in/inf.v0 v0.9.1 // indirect howett.net/plist v1.0.0 // indirect diff --git a/go.sum b/go.sum index d176fb612e9..c44d94e86b4 100644 --- a/go.sum +++ b/go.sum @@ -1905,8 +1905,9 @@ golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4 golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -2039,8 +2040,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2067,8 +2068,9 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2237,8 +2239,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2253,8 +2255,8 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2270,8 +2272,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2558,8 +2560,8 @@ google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZV google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2605,8 +2607,8 @@ google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 h1:r16FSFCMhn7+LU8CzbtAIKppYeU6NUPJVdvXeIqVIq8= google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767/go.mod h1:wKDg0brwMZpaizQ1i7IzYcJjH1TmbJudYdnQC9+J+LE= @@ -2625,8 +2627,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 3d7b3c14584eb517b5dafb155dfcc10dd6ade6d9 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Mon, 23 Oct 2023 10:55:45 +0300 Subject: [PATCH 13/15] Fixing script to read ascii code (#3556) * Fixing script to read ascii code * Changing to read from version.go * Adding workspace to makefile * Adding shell on top of makefile --- deploy/kubernetes/Makefile | 8 +++++--- deploy/kubernetes/elastic-agent-managed-kubernetes.yaml | 2 +- .../kubernetes/elastic-agent-standalone-kubernetes.yaml | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index e4d8b474908..6247c9461e1 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -1,7 +1,9 @@ -ALL=elastic-agent-standalone elastic-agent-managed +SHELL := /bin/bash -BEAT_VERSION=$(shell grep '^:stack-version:' ../../version/docs/version.asciidoc | cut -c 17- ) -BRANCH_VERSION=$(shell grep '^:doc-branch:' ../../version/docs/version.asciidoc | cut -c 14- ) +ALL=elastic-agent-standalone elastic-agent-managed +WORKSPACE=$(shell git rev-parse --show-toplevel) +BEAT_VERSION=$(shell grep -oE '[0-9]+\.[0-9]+\.[0-9]+(\-[a-zA-Z]+[0-9]+)?' "${WORKSPACE}/version/version.go") +BRANCH_VERSION=$(shell cut -d. -f1-2 <<< '${BEAT_VERSION}') #variables needed for ci-create-kubernetes-templates-pull-request ELASTIC_AGENT_REPO=kibana diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 4e9399ec6c6..44df212a629 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -30,7 +30,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent - image: docker.elastic.co/beats/elastic-agent:8.8.1 + image: docker.elastic.co/beats/elastic-agent:8.12.0 env: # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 244003075e0..125936a37b3 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -683,13 +683,13 @@ spec: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # wget -O - https://github.com/elastic/elastic-agent/archive/main.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # wget -O - https://github.com/elastic/elastic-agent/archive/8.12.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.12/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone - image: docker.elastic.co/beats/elastic-agent:8.8.1 + image: docker.elastic.co/beats/elastic-agent:8.12.0 args: ["-c", "/etc/elastic-agent/agent.yml", "-e"] env: # The basic authentication username used to connect to Elasticsearch From 97d9c80fadf3bac7e18f94e86225b01a81121223 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Paolo=20Chil=C3=A0?= Date: Mon, 23 Oct 2023 14:10:53 +0200 Subject: [PATCH 14/15] Fix double stop components (#3482) * Skip stopping already stopped components --- ...92-Prevent-multiple-stops-of-services.yaml | 32 +++++++++++++++++++ pkg/component/runtime/manager.go | 3 ++ pkg/component/runtime/runtime.go | 4 +++ pkg/component/runtime/service.go | 10 ++++++ 4 files changed, 49 insertions(+) create mode 100644 changelog/fragments/1695920792-Prevent-multiple-stops-of-services.yaml diff --git a/changelog/fragments/1695920792-Prevent-multiple-stops-of-services.yaml b/changelog/fragments/1695920792-Prevent-multiple-stops-of-services.yaml new file mode 100644 index 00000000000..e15f5d6e927 --- /dev/null +++ b/changelog/fragments/1695920792-Prevent-multiple-stops-of-services.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Prevent multiple attempts to stop an already stopped service + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: runtime + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 42824794aea..8462ac3c17e 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -705,6 +705,7 @@ func (m *Manager) update(model component.Model, teardown bool) error { var stoppedWg sync.WaitGroup stoppedWg.Add(len(stop)) for _, existing := range stop { + m.logger.Debugf("Stopping component %q", existing.id) _ = existing.stop(teardown, model.Signed) // stop is async, wait for operation to finish, // otherwise new instance may be started and components @@ -755,6 +756,7 @@ func (m *Manager) waitForStopped(comp *componentRuntimeState) { for { latestState := comp.getLatest() if latestState.State == client.UnitStateStopped { + m.logger.Debugf("component %q stopped.", compID) return } @@ -767,6 +769,7 @@ func (m *Manager) waitForStopped(comp *componentRuntimeState) { select { case <-timeoutCh: + m.logger.Errorf("timeout exceeded waiting for component %q to stop", compID) return case <-time.After(stopCheckRetryPeriod): } diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index a04eec804bb..feeee2c6d7e 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -187,6 +187,10 @@ func (s *componentRuntimeState) start() error { } func (s *componentRuntimeState) stop(teardown bool, signed *component.Signed) error { + if s.shuttingDown.Load() { + // already stopping + return nil + } s.shuttingDown.Store(true) if teardown { return s.runtime.Teardown(signed) diff --git a/pkg/component/runtime/service.go b/pkg/component/runtime/service.go index 2ea47e2105c..5f007924d7f 100644 --- a/pkg/component/runtime/service.go +++ b/pkg/component/runtime/service.go @@ -38,6 +38,7 @@ var ( type executeServiceCommandFunc func(ctx context.Context, log *logger.Logger, binaryPath string, spec *component.ServiceOperationsCommandSpec) error // serviceRuntime provides the command runtime for running a component as a service. +// an instance of serviceRuntime is not reused: after being stopped, it cannot be started again. type serviceRuntime struct { comp component.Component log *logger.Logger @@ -124,6 +125,8 @@ func (s *serviceRuntime) Run(ctx context.Context, comm Communicator) (err error) lastCheckin time.Time missedCheckins int tearingDown bool + // flag that signals if we are already stopping + stopping bool ignoreCheckins bool ) @@ -136,6 +139,13 @@ func (s *serviceRuntime) Run(ctx context.Context, comm Communicator) (err error) defer cisStop() onStop := func(am actionMode) { + if stopping { + s.log.Debugf("service %s is already stopping: skipping...", s.name()) + return + } + // the flag is set once and never reset since the serviceRuntime object + // is not supposed to be reused once it's stopping + stopping = true // Stop check-in timer s.log.Debugf("stop check-in timer for %s service", s.name()) checkinTimer.Stop() From 9ae9a4c7e39afa2ab85aac12356dd440836a41cb Mon Sep 17 00:00:00 2001 From: Davide Girardi <1390902+girodav@users.noreply.github.com> Date: Mon, 23 Oct 2023 13:53:37 +0100 Subject: [PATCH 15/15] Add assetbeat as external component (#3503) --- .../1696319263-add-assetbeat-dependency.yaml | 32 +++++++++++++++++++ dev-tools/mage/manifest/manifest.go | 1 + magefile.go | 2 ++ 3 files changed, 35 insertions(+) create mode 100644 changelog/fragments/1696319263-add-assetbeat-dependency.yaml diff --git a/changelog/fragments/1696319263-add-assetbeat-dependency.yaml b/changelog/fragments/1696319263-add-assetbeat-dependency.yaml new file mode 100644 index 00000000000..ab428367953 --- /dev/null +++ b/changelog/fragments/1696319263-add-assetbeat-dependency.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add assetbeat among the external dependencies needed to package Elastic Agent + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/obs-infraobs-team/issues/1114 diff --git a/dev-tools/mage/manifest/manifest.go b/dev-tools/mage/manifest/manifest.go index 62f82eb2c87..a2c377d600c 100644 --- a/dev-tools/mage/manifest/manifest.go +++ b/dev-tools/mage/manifest/manifest.go @@ -87,6 +87,7 @@ func DownloadComponentsFromManifest(manifest string, platforms []string, platfor "beats": {"auditbeat", "filebeat", "heartbeat", "metricbeat", "osquerybeat", "packetbeat"}, "cloud-defend": {"cloud-defend"}, "cloudbeat": {"cloudbeat"}, + "assetbeat": {"assetbeat"}, "elastic-agent-shipper": {"elastic-agent-shipper"}, "endpoint-dev": {"endpoint-security"}, "fleet-server": {"fleet-server"}, diff --git a/magefile.go b/magefile.go index bf7b335cbe8..75b0f3ef93a 100644 --- a/magefile.go +++ b/magefile.go @@ -928,6 +928,7 @@ func packageAgent(platforms []string, packagingFn func()) { // https://artifacts-snapshot.elastic.co/endpoint-dev/latest/8.11.0-SNAPSHOT.json // https://artifacts-snapshot.elastic.co/fleet-server/latest/8.11.0-SNAPSHOT.json // https://artifacts-snapshot.elastic.co/prodfiler/latest/8.11.0-SNAPSHOT.json + // https://artifacts-snapshot.elastic.co/assetbeat/latest/8.11.0-SNAPSHOT.json externalBinaries := map[string]string{ "auditbeat": "beats", "filebeat": "beats", @@ -943,6 +944,7 @@ func packageAgent(platforms []string, packagingFn func()) { "pf-elastic-collector": "prodfiler", "pf-elastic-symbolizer": "prodfiler", "pf-host-agent": "prodfiler", + "assetbeat": "assetbeat", // only supporting linux/amd64 or linux/arm64 } // Only log fatal logs for logs produced using logrus. This is the global logger