From 9b9d9169c7b62845f4e3a3a9045a7a600a02a891 Mon Sep 17 00:00:00 2001 From: mmahacek Date: Mon, 7 Oct 2024 06:37:40 -0700 Subject: [PATCH 01/90] Docs: update Docker setup commands to remove container when done (#41079) --- libbeat/docs/howto/load-index-templates.asciidoc | 3 +-- libbeat/docs/shared-docker.asciidoc | 14 +++++++------- .../tab-widgets/load-dashboards-logstash.asciidoc | 4 ++-- libbeat/docs/tab-widgets/load-dashboards.asciidoc | 4 ++-- .../docs/tab-widgets/load-index-template.asciidoc | 4 ++-- 5 files changed, 14 insertions(+), 15 deletions(-) diff --git a/libbeat/docs/howto/load-index-templates.asciidoc b/libbeat/docs/howto/load-index-templates.asciidoc index 238b514614a7..3fe67ce0875d 100644 --- a/libbeat/docs/howto/load-index-templates.asciidoc +++ b/libbeat/docs/howto/load-index-templates.asciidoc @@ -150,7 +150,7 @@ ifdef::docker_platform[] ["source","sh",subs="attributes"] ---------------------------------------------------------------------- -docker run {dockerimage} setup --index-management{disable_logstash} -E 'output.elasticsearch.hosts=["localhost:9200"]' +docker run --rm {dockerimage} setup --index-management{disable_logstash} -E 'output.elasticsearch.hosts=["localhost:9200"]' ---------------------------------------------------------------------- endif::docker_platform[] @@ -354,4 +354,3 @@ endif::win_only[] PS > Invoke-RestMethod -Method Put -Uri http://localhost:9200/_data_stream/{beatname_lc}-{version} ---- endif::win_os[] - diff --git a/libbeat/docs/shared-docker.asciidoc b/libbeat/docs/shared-docker.asciidoc index 7df2bc1e940f..cbaf3935f0dd 100644 --- a/libbeat/docs/shared-docker.asciidoc +++ b/libbeat/docs/shared-docker.asciidoc @@ -73,7 +73,7 @@ and machine learning jobs. Run this command: ifeval::["{beatname_lc}"=="filebeat"] ["source", "sh", subs="attributes"] -------------------------------------------- -docker run \ +docker run --rm \ {dockerimage} \ setup -E setup.kibana.host=kibana:5601 \ -E output.elasticsearch.hosts=["elasticsearch:9200"] <1> <2> @@ -83,7 +83,7 @@ endif::[] ifeval::["{beatname_lc}"=="metricbeat"] ["source", "sh", subs="attributes"] -------------------------------------------- -docker run \ +docker run --rm \ {dockerimage} \ setup -E setup.kibana.host=kibana:5601 \ -E output.elasticsearch.hosts=["elasticsearch:9200"] <1> <2> @@ -93,7 +93,7 @@ endif::[] ifeval::["{beatname_lc}"=="heartbeat"] ["source", "sh", subs="attributes"] -------------------------------------------- -docker run \ +docker run --rm \ --cap-add=NET_RAW \ {dockerimage} \ setup -E setup.kibana.host=kibana:5601 \ @@ -104,7 +104,7 @@ endif::[] ifeval::["{beatname_lc}"=="packetbeat"] ["source", "sh", subs="attributes"] -------------------------------------------- -docker run \ +docker run --rm \ --cap-add=NET_ADMIN \ {dockerimage} \ setup -E setup.kibana.host=kibana:5601 \ @@ -115,7 +115,7 @@ endif::[] ifeval::["{beatname_lc}"=="auditbeat"] ["source", "sh", subs="attributes"] -------------------------------------------- -docker run \ +docker run --rm \ --cap-add="AUDIT_CONTROL" \ --cap-add="AUDIT_READ" \ {dockerimage} \ @@ -139,7 +139,7 @@ endif::apm-server[] ==== Run {beatname_uc} on a read-only file system -If you'd like to run {beatname_uc} in a Docker container on a read-only file +If you'd like to run {beatname_uc} in a Docker container on a read-only file system, you can do so by specifying the `--read-only` option. {beatname_uc} requires a stateful directory to store application data, so with the `--read-only` option you also need to use the `--mount` option to @@ -185,7 +185,7 @@ docker run -d \ --volume="$(pwd)/{beatname_lc}.docker.yml:/usr/share/{beatname_lc}/{beatname_lc}.yml:ro" \ --volume="/var/lib/docker/containers:/var/lib/docker/containers:ro" \ --volume="/var/run/docker.sock:/var/run/docker.sock:ro" \ - --volume="registry:/usr/share/{beatname_lc}/data:rw" \ + --volume="registry:/usr/share/{beatname_lc}/data:rw" \ {dockerimage} {beatname_lc} -e --strict.perms=false \ -E output.elasticsearch.hosts=["elasticsearch:9200"] <1> <2> -------------------------------------------- diff --git a/libbeat/docs/tab-widgets/load-dashboards-logstash.asciidoc b/libbeat/docs/tab-widgets/load-dashboards-logstash.asciidoc index 141a9e7b0ab6..e4327b78afc3 100644 --- a/libbeat/docs/tab-widgets/load-dashboards-logstash.asciidoc +++ b/libbeat/docs/tab-widgets/load-dashboards-logstash.asciidoc @@ -49,7 +49,7 @@ // tag::docker[] ["source","sh",subs="attributes"] ---- -docker run --net="host" {dockerimage} setup -e \ +docker run --rm --net="host" {dockerimage} setup -e \ -E output.logstash.enabled=false \ -E output.elasticsearch.hosts=['localhost:9200'] \ -E output.elasticsearch.username={beat_default_index_prefix}_internal \ @@ -73,4 +73,4 @@ PS > .{backslash}{beatname_lc}.exe setup -e ` -E output.elasticsearch.password={pwd} ` -E setup.kibana.host=localhost:5601 ---- -// end::win[] \ No newline at end of file +// end::win[] diff --git a/libbeat/docs/tab-widgets/load-dashboards.asciidoc b/libbeat/docs/tab-widgets/load-dashboards.asciidoc index b204f7143d86..34b9df30ffe8 100644 --- a/libbeat/docs/tab-widgets/load-dashboards.asciidoc +++ b/libbeat/docs/tab-widgets/load-dashboards.asciidoc @@ -29,7 +29,7 @@ // tag::docker[] ["source","sh",subs="attributes"] ---- -docker run --net="host" {dockerimage} setup --dashboards +docker run --rm --net="host" {dockerimage} setup --dashboards ---- // end::docker[] @@ -45,4 +45,4 @@ and run: ---- PS > .{backslash}{beatname_lc}.exe setup --dashboards ---- -// end::win[] \ No newline at end of file +// end::win[] diff --git a/libbeat/docs/tab-widgets/load-index-template.asciidoc b/libbeat/docs/tab-widgets/load-index-template.asciidoc index c28544532c0d..40914f378f1e 100644 --- a/libbeat/docs/tab-widgets/load-index-template.asciidoc +++ b/libbeat/docs/tab-widgets/load-index-template.asciidoc @@ -29,7 +29,7 @@ // tag::docker[] ["source","sh",subs="attributes"] ---- -docker run --net="host" {dockerimage} setup --index-management +docker run --rm --net="host" {dockerimage} setup --index-management ---- // end::docker[] @@ -45,4 +45,4 @@ and run: ---- PS > .{backslash}{beatname_lc}.exe setup --index-management ---- -// end::win[] \ No newline at end of file +// end::win[] From b7d7feced818f999192cf57afa38ec53e39c3456 Mon Sep 17 00:00:00 2001 From: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Date: Mon, 7 Oct 2024 09:57:33 -0400 Subject: [PATCH 02/90] Fix read-only instructions for running Beats on Docker (#41120) * Fix read-only instructions for running Beats on Docker * Fix up command --- libbeat/docs/shared-docker.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libbeat/docs/shared-docker.asciidoc b/libbeat/docs/shared-docker.asciidoc index cbaf3935f0dd..da5088072b03 100644 --- a/libbeat/docs/shared-docker.asciidoc +++ b/libbeat/docs/shared-docker.asciidoc @@ -149,9 +149,9 @@ For example: ["source", "sh", subs="attributes"] -------------------------------------------- -docker run \ - --mount type=source=$(pwd}/data,destination=/usr/share/{beatname_lc}/data - --read-only +docker run --rm \ + --mount type=bind,source=$(pwd)/data,destination=/usr/share/{beatname_lc}/data \ + --read-only \ {dockerimage} -------------------------------------------- From 60bdc6a402154dfbf263961431207787933e660b Mon Sep 17 00:00:00 2001 From: Lee E Hinman <57081003+leehinman@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:55:22 -0500 Subject: [PATCH 03/90] Revert "refactor fleet mode detection and storage" (#41151) * Revert "refactor fleet mode detection and storage (#40667)" This reverts commit 33096202ff90c79daeb59447e61ff3430a6b0ea2. * fix import --- libbeat/cfgfile/cfgfile.go | 23 --------------- libbeat/common/fleetmode/fleet_mode.go | 39 ++++++++++++++++++-------- 2 files changed, 27 insertions(+), 35 deletions(-) diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index f77325109779..2b88aaad1577 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -112,8 +112,6 @@ func GetDefaultCfgfile() string { } // HandleFlags adapts default config settings based on command line flags. -// This also stores if -E management.enabled=true was set on command line -// to determine if running the Beat under agent. func HandleFlags() error { // default for the home path is the binary location home, err := filepath.Abs(filepath.Dir(os.Args[0])) @@ -131,27 +129,6 @@ func HandleFlags() error { common.PrintConfigDebugf(overwrites, "CLI setting overwrites (-E flag):") } - // Enable check to see if beat is running under Agent - // This is stored in a package so the modules which don't have - // access to the config can check this value. - type management struct { - Enabled bool `config:"management.enabled"` - } - var managementSettings management - cfgFlag := flag.Lookup("E") - if cfgFlag == nil { - fleetmode.SetAgentMode(false) - return nil - } - cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) - cliCfg := cfgObject.Config() - - err = cliCfg.Unpack(&managementSettings) - if err != nil { - fleetmode.SetAgentMode(false) - return nil //nolint:nilerr // unpacking failing isn't an error for this case - } - fleetmode.SetAgentMode(managementSettings.Enabled) return nil } diff --git a/libbeat/common/fleetmode/fleet_mode.go b/libbeat/common/fleetmode/fleet_mode.go index 97a17804f647..af179b887eac 100644 --- a/libbeat/common/fleetmode/fleet_mode.go +++ b/libbeat/common/fleetmode/fleet_mode.go @@ -17,18 +17,33 @@ package fleetmode -var managementEnabled bool - -// SetAgentMode stores if the Beat is running under Elastic Agent. -// Normally this is called when the command line flags are parsed. -// This is stored as a package level variable because some components -// (like filebeat/metricbeat modules) don't have access to the -// configuration information to determine this on their own. -func SetAgentMode(enabled bool) { - managementEnabled = enabled -} +import ( + "flag" + + "github.com/elastic/elastic-agent-libs/config" +) -// Enabled returns true if the Beat is running under Elastic Agent. +// Enabled checks to see if filebeat/metricbeat is running under Agent +// The management setting is stored in the main Beat runtime object, but we can't see that from a module +// So instead we check the CLI flags, since Agent starts filebeat/metricbeat with "-E", "management.enabled=true" func Enabled() bool { - return managementEnabled + type management struct { + Enabled bool `config:"management.enabled"` + } + var managementSettings management + + cfgFlag := flag.Lookup("E") + if cfgFlag == nil { + return false + } + + cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) + cliCfg := cfgObject.Config() + + err := cliCfg.Unpack(&managementSettings) + if err != nil { + return false + } + + return managementSettings.Enabled } From 070db8ec1a7c6655fcbbcb75d530e803533a8a23 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 13:58:33 -0400 Subject: [PATCH 04/90] [Automation] Bump Golang version to 1.22.8 (#41139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update Heartbeat Dockerfile Made with ❤️️ by updatecli * chore: Update Heartbeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update Functionbeat Dockerfile Made with ❤️️ by updatecli * chore: Update NATS module Dockerfile Made with ❤️️ by updatecli * chore: Update Metricbeat Dockerfile Made with ❤️️ by updatecli * chore: Update .golangci.yml Made with ❤️️ by updatecli * chore: Update Auditbeat Dockerfile Made with ❤️️ by updatecli * chore: Update HTTP module Dockerfile Made with ❤️️ by updatecli * chore: Update Packetbeat Dockerfile Made with ❤️️ by updatecli * chore: Update version.asciidoc Made with ❤️️ by updatecli * chore: Update go.mod Made with ❤️️ by updatecli * chore: Update from vsphere Dockerfile Made with ❤️️ by updatecli * chore: Update Filebeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update stan Dockerfile Made with ❤️️ by updatecli * chore: Update Metricbeat debug Dockerfile Made with ❤️️ by updatecli * Add changelog. * Go mod tidy. * Remove toolchain. --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Craig MacKenzie --- .golangci.yml | 8 ++++---- CHANGELOG.next.asciidoc | 2 +- auditbeat/Dockerfile | 2 +- dev-tools/kubernetes/filebeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/heartbeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/metricbeat/Dockerfile.debug | 2 +- go.mod | 2 -- heartbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- metricbeat/module/http/_meta/Dockerfile | 2 +- metricbeat/module/nats/_meta/Dockerfile | 2 +- metricbeat/module/vsphere/_meta/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/metricbeat/module/stan/_meta/Dockerfile | 2 +- 16 files changed, 18 insertions(+), 20 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 074f985f479c..52c4fb797204 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -149,7 +149,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.22.7" + go: "1.22.8" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -167,19 +167,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.22.7" + go: "1.22.8" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.22.7" + go: "1.22.8" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.22.7" + go: "1.22.8" gosec: excludes: diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ac9e08823c58..dc9d0f457ff8 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -112,10 +112,10 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Validate config of the `replace` processor {pull}40047[40047] - Allow port number 0 in the community ID flowhash processor {pull}40259[40259] - Fix handling of escaped brackets in syslog structured data. {issue}40445[40445] {pull}40446[40446] -- Update Go version to 1.22.6. {pull}40528[40528] - Aborts all active connections for Elasticsearch output. {pull}40572[40572] - Closes beat Publisher on beat stop and by the Agent manager. {pull}40572[40572] - The journald input now restarts if there is an error/crash {issue}32782[32782] {pull}40558[40558] +- Update Go version to 1.22.8. {pull}41139[41139] *Auditbeat* diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 0545f350da60..2241aa16ad1d 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 +FROM golang:1.22.8 RUN \ apt-get update \ diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index 5532006d79cb..dd5b128cacaa 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.22.7 as builder +FROM golang:1.22.8 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index 0802bb167839..193516f058a7 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.22.6 as builder +FROM golang:1.22.8 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index 7fa75c27b96d..a8c567b9da95 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.22.7 as builder +FROM golang:1.22.8 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/go.mod b/go.mod index 0b86bf6a6cfd..e20e3b0fc12c 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module github.com/elastic/beats/v7 go 1.22.0 -toolchain go1.22.7 - require ( cloud.google.com/go/bigquery v1.62.0 cloud.google.com/go/monitoring v1.20.4 diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index 3eab83d42d5c..a30644dd04a6 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 +FROM golang:1.22.8 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 38bad88776ce..468ad95c9cec 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 9.0.0 :doc-branch: main -:go-version: 1.22.7 +:go-version: 1.22.8 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index b42c67c98d1a..d163510a06e2 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 +FROM golang:1.22.8 COPY --from=docker:26.0.0-alpine3.19 /usr/local/bin/docker /usr/local/bin/ RUN \ diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index e9be272e4c87..324b026ef9df 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 +FROM golang:1.22.8 COPY test/main.go main.go diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index 79ee1edab0ea..4dd2715b4aef 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.22.7 AS build-env +FROM golang:1.22.8 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index 837e772303fb..374f02453f93 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,5 +1,5 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:1.22.7 +FROM golang:1.22.8 RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 30d9bdaf3fc0..e739ac9efd19 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 +FROM golang:1.22.8 RUN \ apt-get update \ diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index bd9bf8972e6b..f5a964939235 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 +FROM golang:1.22.8 RUN \ apt-get update \ diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 0cf5f65b3b55..8150094a78bf 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.22.7 AS build-env +FROM golang:1.22.8 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build . From 6d4fbfc3487508bb5af764b428a46259fea10909 Mon Sep 17 00:00:00 2001 From: Gabriel Pop <94497545+gpop63@users.noreply.github.com> Date: Mon, 7 Oct 2024 20:59:38 +0300 Subject: [PATCH 05/90] [metricbeat] [helper] Fix http server helper SSL config (#39405) * add changelog entry * fix TLS config * fix changelog pr id * golangci-lint fixes * mage check * fix http server ssl test * Update metricbeat/helper/server/http/http_test.go Co-authored-by: Tiago Queiroz * fix changelog --------- Co-authored-by: Tiago Queiroz --- CHANGELOG.next.asciidoc | 1 + metricbeat/helper/server/http/http.go | 16 +- metricbeat/helper/server/http/http_test.go | 178 ++++++++++++++------- 3 files changed, 131 insertions(+), 64 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index dc9d0f457ff8..46a17a8fea97 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -201,6 +201,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Remove excessive info-level logs in cgroups setup {pull}40491[40491] - Add missing ECS Cloud fields in GCP `metrics` metricset when using `exclude_labels: true` {issue}40437[40437] {pull}40467[40467] - Add AWS OwningAccount support for cross account monitoring {issue}40570[40570] {pull}40691[40691] +- Fix http server helper SSL config. {pull}39405[39405] *Osquerybeat* diff --git a/metricbeat/helper/server/http/http.go b/metricbeat/helper/server/http/http.go index 782b0938471a..4665a30eeab7 100644 --- a/metricbeat/helper/server/http/http.go +++ b/metricbeat/helper/server/http/http.go @@ -19,10 +19,11 @@ package http import ( "context" - "io/ioutil" + "io" "net" "net/http" "strconv" + "time" "github.com/elastic/beats/v7/metricbeat/helper/server" "github.com/elastic/beats/v7/metricbeat/mb" @@ -73,10 +74,11 @@ func getDefaultHttpServer(mb mb.BaseMetricSet) (*HttpServer, error) { } httpServer := &http.Server{ - Addr: net.JoinHostPort(config.Host, strconv.Itoa(int(config.Port))), + Addr: net.JoinHostPort(config.Host, strconv.Itoa(config.Port)), + ReadHeaderTimeout: 10 * time.Second, } if tlsConfig != nil { - httpServer.TLSConfig = tlsConfig.BuildModuleClientConfig(config.Host) + httpServer.TLSConfig = tlsConfig.BuildServerConfig(config.Host) } h.server = httpServer return h, nil @@ -126,7 +128,7 @@ func (h *HttpServer) Start() error { func (h *HttpServer) Stop() { close(h.done) h.stop() - h.server.Shutdown(h.ctx) + _ = h.server.Shutdown(h.ctx) close(h.eventQueue) } @@ -147,7 +149,7 @@ func (h *HttpServer) handleFunc(writer http.ResponseWriter, req *http.Request) { meta["Content-Type"] = contentType } - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) if err != nil { logp.Err("Error reading body: %v", err) http.Error(writer, "Unexpected error reading request payload", http.StatusBadRequest) @@ -168,9 +170,9 @@ func (h *HttpServer) handleFunc(writer http.ResponseWriter, req *http.Request) { case "GET": writer.WriteHeader(http.StatusOK) if req.TLS != nil { - writer.Write([]byte("HTTPS Server accepts data via POST")) + _, _ = writer.Write([]byte("HTTPS Server accepts data via POST")) } else { - writer.Write([]byte("HTTP Server accepts data via POST")) + _, _ = writer.Write([]byte("HTTP Server accepts data via POST")) } } diff --git a/metricbeat/helper/server/http/http_test.go b/metricbeat/helper/server/http/http_test.go index 7decdd821be4..9b5e8a330907 100644 --- a/metricbeat/helper/server/http/http_test.go +++ b/metricbeat/helper/server/http/http_test.go @@ -23,8 +23,9 @@ import ( "bytes" "context" "crypto/tls" + "crypto/x509" "fmt" - "io/ioutil" + "io" "net" "net/http" "strconv" @@ -141,63 +142,59 @@ func getHTTPServer(t *testing.T, host string, port int, connectionType string) ( } func prepareTLSConfig(t *testing.T, host string) *tls.Config { + certPem := []byte(`-----BEGIN CERTIFICATE----- -MIIDwTCCAqmgAwIBAgIJAONBEV813hm6MA0GCSqGSIb3DQEBCwUAMHcxCzAJBgNV -BAYTAkJSMQswCQYDVQQIDAJTUDEPMA0GA1UEBwwGRlJBTkNBMRAwDgYDVQQKDAdF -TEFTVElDMQswCQYDVQQLDAJPVTERMA8GA1UEAwwIaG9tZS5jb20xGDAWBgkqhkiG -9w0BCQEWCWV1QGV1LmNvbTAeFw0xOTAzMjYxOTMxMjhaFw0yOTAzMjMxOTMxMjha -MHcxCzAJBgNVBAYTAkJSMQswCQYDVQQIDAJTUDEPMA0GA1UEBwwGRlJBTkNBMRAw -DgYDVQQKDAdFTEFTVElDMQswCQYDVQQLDAJPVTERMA8GA1UEAwwIaG9tZS5jb20x -GDAWBgkqhkiG9w0BCQEWCWV1QGV1LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBALOJ2dxpBsQtRvs2hSuUhDsf4w6G3swFqtIXLedPvz1rNuofm75G -dA9pqXiI3hDw2ZuIJZItXE3FfVXxoE/ugsFw6cVLKrnpQ8exIv8K0JNuR22faFcR -LmDx/YLw0wmOnM2maBSaetrM5F4CwoVqDmOwZHs9fbADqthAHrbCAzNTkqnx2B4/ -RWaYPbRWlSQ7CrWQE9cNJ/WMdUjznd5H0IiV7k/cHKIbXi3+JNinCWHAACWWS3ig -DjjCZd9lHkDH6qSpNGsQU5y0eiFAiiBVPqDIdVfPRe4pC81z3Dp6Wqs0uHXHYHqB -o3YWkXngTLlMLZtIMF+pWlCJZkscgLjL/N8CAwEAAaNQME4wHQYDVR0OBBYEFBpI -Tu/9mmRqithdHZZMu5jRLHebMB8GA1UdIwQYMBaAFBpITu/9mmRqithdHZZMu5jR -LHebMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGTS+cvN/vGjbkDF -wZRG8xMeHPHzlCWKNEGwZXTTBADrjfnppW5I2f5cDZzg71+UzQSJmBmHKZd+adrW -2GA888CAT+birIE6EAwIyq7ZGe77ymRspugyb7AK46QOKApED3izxId36Tk5/a0P -QY3WOTC0Y4yvz++gbx/uviYDMoHuJl0nIEXqtT9OZ2V2GqCToJu300RV/MIRtk6s -0U1d9CRDkjNolGVbYo2VnDJbZ8LQtJHS5iDeiEztay5Cky4NvVZsbCxrgNrr3h/v -upHEJ28Q7QzMnRC7d/THI6fRW1mG6BuFT3WPW5K7EAfgQDlyyspTDrACrYTuWC+y -013uTlI= +MIIC9TCCAd2gAwIBAgIUa4hI3ZErW13j7zCXg1Ory+FhITYwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTI0MDUxNjIwNDIwMloYDzMwMjMw +OTE3MjA0MjAyWjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDJcUM8vV6vGTycqImCwu06NSsuIHdKukHQTuvHbRGP +kXwlXNDMYEdoUX1mPArqGFunrQ9/myWoqQA7b9MTIZl4GheHvABuw0kuRos0/t4Y +zCFRRV27ATswAYp/WVBvHRZEedLJj25x8DoMeljV9dq/JKtaNNGKgztMcqWTSFPy +c+pDSSgRiP/sDebUhRaLXUhRVMsud9Wlwf6bmn62Ocj7EgrLj75u0IAb2alQ9bL9 +cLAPAi0/KFx4nl8tCMQUXYM0PyNCkSM8wdwHcLiYNEKOtEx0Y4otiYLH98wlWJcl +AtMzHk5IexcTfCGzOk1fau3gNxbM9fH3+C8WBprm5lT5AgMBAAGjPTA7MBoGA1Ud +EQQTMBGHBH8AAAGCCWxvY2FsaG9zdDAdBgNVHQ4EFgQUjuHPOPincRSGgEC4DnOs +RGR8MW4wDQYJKoZIhvcNAQELBQADggEBAIFdEIGhjWrQMDx5bjif21XOaBr61uKU +3YnKMlX4bJrqjSy164SN0qBaurYUspam8YyC31IU3FSvulRoUVr3Y/VCpnfuDuEw +c5C2XJWvslRUTqZ4TAopj1vvt7wcFOJixfH3PMMdA8sKArWxlV4LtPN8h5Det0qG +F5D03fWQehviLetk7l/fdAElSoigGhJrb3HddfRcepvrWVpcUJEX3rdgwKh5RszN +1WTX/kA6w5o7JAylybV5JNKvzbpfQOH4MQD8306FB+xFPSZHgXUWJ9bJE/CbR5vd +onX6v9itbKD/hxMOZQ6HIn6F1fKK3JMJ77t35cJonwVHwV+/K2HJmNA= -----END CERTIFICATE-----`) - keyPem := []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAs4nZ3GkGxC1G+zaFK5SEOx/jDobezAWq0hct50+/PWs26h+b -vkZ0D2mpeIjeEPDZm4glki1cTcV9VfGgT+6CwXDpxUsquelDx7Ei/wrQk25HbZ9o -VxEuYPH9gvDTCY6czaZoFJp62szkXgLChWoOY7Bkez19sAOq2EAetsIDM1OSqfHY -Hj9FZpg9tFaVJDsKtZAT1w0n9Yx1SPOd3kfQiJXuT9wcohteLf4k2KcJYcAAJZZL -eKAOOMJl32UeQMfqpKk0axBTnLR6IUCKIFU+oMh1V89F7ikLzXPcOnpaqzS4dcdg -eoGjdhaReeBMuUwtm0gwX6laUIlmSxyAuMv83wIDAQABAoIBAD1kY/T0jPXELcN1 -LzBpxpWZH8E16TWGspTIjE/Oeyx7XvnL+SulV8Z1cRfgZV8RnLeMZJyJmkiVwXgD -+bebbWbMP4PRYjjURPMh5T+k6RGg4hfgLIOpQlywIuoFg4R/GatQvcJd2Ki861Ii -S3XngCgihxmFO1dWybLMqjQAP6vq01sbctUXYddFd5STInzrceoXwkLjp3gTR1et -FG+Anmzbxp8e2ETXvwuf7eZhVwCJ2DxBt7tx1j5Csuj1LjaVTe5qR7B1oM7/vo0b -LlY9IixAAi62Rrv4YSvMAtMI6mQt+AM/4uBVqoG/ipgkuoQVuQ+M4lGdmEXwEEkz -Ol7SlMECgYEA11tV+ZekVsujBmasTU7TfWtcYtRHh+FSC040bVLiE6XZbuVJ4sSA -TvuUDs+3XM8blnkfVo826WY4+bKkj1PdCFsmG5pm+wnSTPFKWsCtsSyA3ts85t3O -IvcCxXA/1xL9O/UdWfrl2+IJ3yLDEjEU5QTYP34+KDBZM3u6tJzjWe8CgYEA1WwA -8d75h9UQyFXWEOiwJmR6yX7PGkpYE3J7m2p2giEbLm+9no5CEmE9T74k3m0eLZug -g/F1MA/evhXEYho6f+lS9Q0ZdtyU2EFrdvuLlUw6FJIWnaOLlVR/aC6BvAlxLDRb -RUGqDKDjl1Die0s8F1aDHGvNvGaZRN4Z23BRPBECgYBE8pMGA8yzlSKui/SiE5iW -UOcVJQ15rWPNBs62KZED5VdFr9cF6Q+DOfxe+ZWk+xHEDSdBWTylYPrgxpb05E6h -vDzpHXfW64AO7jl18LYrQSpJLzvCVkUG4LpcZ+GohAXbSlCJXFB3I1kxvTli+5/K -6tApE8vmpgQI/ZX6+Te4tQKBgBcQ3C1H5voaOf0c4czkCR2tIGQkk2eI/2nipp9O -a053G4PySbEYOOXZopG6wCtV6bwOJNP9xaeTH4S1v4rGwOnQIsofR1BEWMXilCXA -2/4fxesxOsaAxXY3Mqnk1NqovpWDdxXOGf3RaaeR81hV8kGndPYeZJbnE8uQoYTI -586xAoGBAI2SR17xbgfiQBZxgGqamslz4NqBkZUBs4DIAGMAXS21rW/2bbbRaSii -mGmkdaXx+l077AuO0peX2uBvJAx6PvAVW0qroeOLcCo6EuUGTNVhBej6L9hMwhIO -r0tZLlMt75zcnJBicMbIrrzIGVYMHjT+m1QTGbrGb/tcEIGtmXwO ------END RSA PRIVATE KEY-----`) + keyPem := []byte(`-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJcUM8vV6vGTyc +qImCwu06NSsuIHdKukHQTuvHbRGPkXwlXNDMYEdoUX1mPArqGFunrQ9/myWoqQA7 +b9MTIZl4GheHvABuw0kuRos0/t4YzCFRRV27ATswAYp/WVBvHRZEedLJj25x8DoM +eljV9dq/JKtaNNGKgztMcqWTSFPyc+pDSSgRiP/sDebUhRaLXUhRVMsud9Wlwf6b +mn62Ocj7EgrLj75u0IAb2alQ9bL9cLAPAi0/KFx4nl8tCMQUXYM0PyNCkSM8wdwH +cLiYNEKOtEx0Y4otiYLH98wlWJclAtMzHk5IexcTfCGzOk1fau3gNxbM9fH3+C8W +Bprm5lT5AgMBAAECggEAEYpJsv/AP1ngs7lfI+IqOt/HT0BncrvOID/G+vntxgUC +fNRcn/cgMJ6r3xuKTcDqNir1BwTw3gM9MG+3vto1nUYUV27Q0NQzSpK861Pn7dvU +aNmz5CUizLbNovIZdVtghXzgFEnncYdb3ptGofbC4dLlErk3p6punuT6stzg5mL2 +y/2yHBrfQEnuDRI8pQ5Vcuo24GioZqWiS35qVGLbonvor0DKv4lkNjMix6ulwwb+ +3rvEAhTOhgYKe7h6RjKnc4SbIsnSpGzhC9M7hLF+F57GIw61uaJnISfkuw/FGhaR +XkeyV8TB8MDTgP30+7xam6pvB2rKcRsrVgPmLC7WgQKBgQDRHgRHDTgpBSx9F+N6 +6KU01g5cemxKVBHMm5L2n99YpR9BoiWViKkFWAWALmRlq/nFk22hq4t2+niH/6a+ +0ioAhIOnZZTXK/n5DsBCdqg1d1ZO4ih4Iw1/TR1iIR0M8ptkIBGVWKslV8OKQNd4 +zNUCmDzb8pmuzVKjwVs7ca9HmQKBgQD2msK7eh81A2dxXPl1chcudFB33zMwA1Y0 +3ZEPsGAinvU5ILwwMlg1w7N1NKwcDYiBkJG1SCoujoTsYoXMKjnlgf5uoklfJJBI +U3QKYMGDRdlqE02V31KBVcv/EdNR8olfjy1xbgCKu04rYnCPGLSLNc6MgcSMYnLr +y9rZlq5UYQKBgQCi0K4f6+j39zFGTF0vCwfl9WvFEQRTctVQ6ygnoR4yVI3bejWt +EXQX1wqhXH2Ks7WK4ViQcZHqluVVbfUTyWoucP5YTTzvsyuzgIqstNoOltW6IVfF +AfW2UgI4rvOBazsVX+qQzzKhpo12jTm2sjR/Cq0HywFhGjfni9pOlBsWsQKBgQDz +3IbFLja+Dee1SuPFKFWUMqGAaNANor8U+CYDBb+LfPWy0JRIdQCV6jkEplmsRBXB +Sl1Mj1hnQbhgqez1wKwQMUSR0xoLY/TqENynhpbWYbRmGUCX/IdyLo3UZqQ6XUVL +oiKmEMmoZyEd9fKpDx06rLLcb1cWHCTY2HZKxZ8PAQKBgF3ftzNurXMCBH9W2RkI +hHhpHArwSLCsDVeGpS6vYDz+EX+RP1t1jJZbTRyOkk/X5RNVA3Yup6Lw8ANWqpPJ +MMbn7YyWGaClkcuHqavOU7kfaqF5S6vECOAtSWd+NPOHUALTDnmBUnLTE4KmzarO +8hd7Y6EEu0Lwkc3GnoQUwzRh +-----END PRIVATE KEY-----`) cfg := &tls.Config{ - ServerName: host, - MinVersion: tls.VersionTLS12, - MaxVersion: tls.VersionTLS12, - InsecureSkipVerify: true, + ServerName: host, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS12, } cfg.Certificates = make([]tls.Certificate, 1) cert, err := tls.X509KeyPair(certPem, keyPem) @@ -213,12 +210,79 @@ func writeToServer(t *testing.T, message, host string, port int, connectionMetho var str = []byte(message) req, err := http.NewRequest(connectionMethod, url, bytes.NewBuffer(str)) req.Header.Set("Content-Type", "text/plain") + client := &http.Client{} + + certPem := []byte(`-----BEGIN CERTIFICATE----- +MIIC9TCCAd2gAwIBAgIUa4hI3ZErW13j7zCXg1Ory+FhITYwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTI0MDUxNjIwNDIwMloYDzMwMjMw +OTE3MjA0MjAyWjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDJcUM8vV6vGTycqImCwu06NSsuIHdKukHQTuvHbRGP +kXwlXNDMYEdoUX1mPArqGFunrQ9/myWoqQA7b9MTIZl4GheHvABuw0kuRos0/t4Y +zCFRRV27ATswAYp/WVBvHRZEedLJj25x8DoMeljV9dq/JKtaNNGKgztMcqWTSFPy +c+pDSSgRiP/sDebUhRaLXUhRVMsud9Wlwf6bmn62Ocj7EgrLj75u0IAb2alQ9bL9 +cLAPAi0/KFx4nl8tCMQUXYM0PyNCkSM8wdwHcLiYNEKOtEx0Y4otiYLH98wlWJcl +AtMzHk5IexcTfCGzOk1fau3gNxbM9fH3+C8WBprm5lT5AgMBAAGjPTA7MBoGA1Ud +EQQTMBGHBH8AAAGCCWxvY2FsaG9zdDAdBgNVHQ4EFgQUjuHPOPincRSGgEC4DnOs +RGR8MW4wDQYJKoZIhvcNAQELBQADggEBAIFdEIGhjWrQMDx5bjif21XOaBr61uKU +3YnKMlX4bJrqjSy164SN0qBaurYUspam8YyC31IU3FSvulRoUVr3Y/VCpnfuDuEw +c5C2XJWvslRUTqZ4TAopj1vvt7wcFOJixfH3PMMdA8sKArWxlV4LtPN8h5Det0qG +F5D03fWQehviLetk7l/fdAElSoigGhJrb3HddfRcepvrWVpcUJEX3rdgwKh5RszN +1WTX/kA6w5o7JAylybV5JNKvzbpfQOH4MQD8306FB+xFPSZHgXUWJ9bJE/CbR5vd +onX6v9itbKD/hxMOZQ6HIn6F1fKK3JMJ77t35cJonwVHwV+/K2HJmNA= +-----END CERTIFICATE-----`) + + keyPem := []byte(`-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJcUM8vV6vGTyc +qImCwu06NSsuIHdKukHQTuvHbRGPkXwlXNDMYEdoUX1mPArqGFunrQ9/myWoqQA7 +b9MTIZl4GheHvABuw0kuRos0/t4YzCFRRV27ATswAYp/WVBvHRZEedLJj25x8DoM +eljV9dq/JKtaNNGKgztMcqWTSFPyc+pDSSgRiP/sDebUhRaLXUhRVMsud9Wlwf6b +mn62Ocj7EgrLj75u0IAb2alQ9bL9cLAPAi0/KFx4nl8tCMQUXYM0PyNCkSM8wdwH +cLiYNEKOtEx0Y4otiYLH98wlWJclAtMzHk5IexcTfCGzOk1fau3gNxbM9fH3+C8W +Bprm5lT5AgMBAAECggEAEYpJsv/AP1ngs7lfI+IqOt/HT0BncrvOID/G+vntxgUC +fNRcn/cgMJ6r3xuKTcDqNir1BwTw3gM9MG+3vto1nUYUV27Q0NQzSpK861Pn7dvU +aNmz5CUizLbNovIZdVtghXzgFEnncYdb3ptGofbC4dLlErk3p6punuT6stzg5mL2 +y/2yHBrfQEnuDRI8pQ5Vcuo24GioZqWiS35qVGLbonvor0DKv4lkNjMix6ulwwb+ +3rvEAhTOhgYKe7h6RjKnc4SbIsnSpGzhC9M7hLF+F57GIw61uaJnISfkuw/FGhaR +XkeyV8TB8MDTgP30+7xam6pvB2rKcRsrVgPmLC7WgQKBgQDRHgRHDTgpBSx9F+N6 +6KU01g5cemxKVBHMm5L2n99YpR9BoiWViKkFWAWALmRlq/nFk22hq4t2+niH/6a+ +0ioAhIOnZZTXK/n5DsBCdqg1d1ZO4ih4Iw1/TR1iIR0M8ptkIBGVWKslV8OKQNd4 +zNUCmDzb8pmuzVKjwVs7ca9HmQKBgQD2msK7eh81A2dxXPl1chcudFB33zMwA1Y0 +3ZEPsGAinvU5ILwwMlg1w7N1NKwcDYiBkJG1SCoujoTsYoXMKjnlgf5uoklfJJBI +U3QKYMGDRdlqE02V31KBVcv/EdNR8olfjy1xbgCKu04rYnCPGLSLNc6MgcSMYnLr +y9rZlq5UYQKBgQCi0K4f6+j39zFGTF0vCwfl9WvFEQRTctVQ6ygnoR4yVI3bejWt +EXQX1wqhXH2Ks7WK4ViQcZHqluVVbfUTyWoucP5YTTzvsyuzgIqstNoOltW6IVfF +AfW2UgI4rvOBazsVX+qQzzKhpo12jTm2sjR/Cq0HywFhGjfni9pOlBsWsQKBgQDz +3IbFLja+Dee1SuPFKFWUMqGAaNANor8U+CYDBb+LfPWy0JRIdQCV6jkEplmsRBXB +Sl1Mj1hnQbhgqez1wKwQMUSR0xoLY/TqENynhpbWYbRmGUCX/IdyLo3UZqQ6XUVL +oiKmEMmoZyEd9fKpDx06rLLcb1cWHCTY2HZKxZ8PAQKBgF3ftzNurXMCBH9W2RkI +hHhpHArwSLCsDVeGpS6vYDz+EX+RP1t1jJZbTRyOkk/X5RNVA3Yup6Lw8ANWqpPJ +MMbn7YyWGaClkcuHqavOU7kfaqF5S6vECOAtSWd+NPOHUALTDnmBUnLTE4KmzarO +8hd7Y6EEu0Lwkc3GnoQUwzRh +-----END PRIVATE KEY-----`) + + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(certPem); !ok { + t.Fatal("failed to append server certificate to the pool") + } + + cfg := &tls.Config{ + ServerName: host, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS12, + RootCAs: certPool, + } + cfg.Certificates = make([]tls.Certificate, 1) + cert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + t.Error(err) + } + cfg.Certificates = []tls.Certificate{cert} + if connectionType == "HTTPS" { client.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, // test server certificate is not trusted. - }} + TLSClientConfig: cfg, + } } resp, err := client.Do(req) if err != nil { @@ -230,7 +294,7 @@ func writeToServer(t *testing.T, message, host string, port int, connectionMetho if connectionMethod == "GET" { if resp.StatusCode == http.StatusOK { - bodyBytes, err2 := ioutil.ReadAll(resp.Body) + bodyBytes, err2 := io.ReadAll(resp.Body) if err2 != nil { t.Error(err) t.FailNow() From 2ff38df906203ba041d1f5492a6007809ad0b066 Mon Sep 17 00:00:00 2001 From: Lee E Hinman <57081003+leehinman@users.noreply.github.com> Date: Mon, 7 Oct 2024 15:16:40 -0500 Subject: [PATCH 06/90] Revert "Don't register cfgfile flags on `flag.CommandLine` in `func init()`" (#41152) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert "Don't register cfgfile flags on `flag.CommandLine` in `func init()` (…" This reverts commit 07360ca6f8df2fdd4e0a4d0e180c65967c96ff51. * linter fixes --- libbeat/cfgfile/cfgfile.go | 21 +++---------------- libbeat/cmd/instance/beat_integration_test.go | 9 ++++---- libbeat/cmd/root.go | 3 --- 3 files changed, 8 insertions(+), 25 deletions(-) diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index 2b88aaad1577..ca19af8cb9f6 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -18,11 +18,9 @@ package cfgfile import ( - "flag" "fmt" "os" "path/filepath" - "sync" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/fleetmode" @@ -35,10 +33,8 @@ var ( // The default config cannot include the beat name as it is not initialized // when this variable is created. See ChangeDefaultCfgfileFlag which should // be called prior to flags.Parse(). - commandLine flag.FlagSet - commandLineOnce sync.Once - configfiles = config.StringArrFlag(&commandLine, "c", "beat.yml", "Configuration file, relative to path.config") - overwrites = config.SettingFlag(&commandLine, "E", "Configuration overwrite") + configfiles = config.StringArrFlag(nil, "c", "beat.yml", "Configuration file, relative to path.config") + overwrites = config.SettingFlag(nil, "E", "Configuration overwrite") // Additional default settings, that must be available for variable expansion defaults = config.MustNewConfigFrom(map[string]interface{}{ @@ -58,7 +54,7 @@ var ( func init() { // add '-path.x' options overwriting paths in 'overwrites' config makePathFlag := func(name, usage string) *string { - return config.ConfigOverwriteFlag(&commandLine, overwrites, name, name, "", usage) + return config.ConfigOverwriteFlag(nil, overwrites, name, name, "", usage) } homePath = makePathFlag("path.home", "Home path") @@ -67,17 +63,6 @@ func init() { makePathFlag("path.logs", "Logs path") } -// InitFlags is for explicitly initializing the flags. -// It may get called repeatedly for different flagsets, but not -// twice for the same one. -func InitFlags() { - commandLineOnce.Do(func() { - commandLine.VisitAll(func(f *flag.Flag) { - flag.CommandLine.Var(f.Value, f.Name, f.Usage) - }) - }) -} - // OverrideChecker checks if a config should be overwritten. type OverrideChecker func(*config.C) bool diff --git a/libbeat/cmd/instance/beat_integration_test.go b/libbeat/cmd/instance/beat_integration_test.go index 4c2aa94bd1d4..baf7657665d2 100644 --- a/libbeat/cmd/instance/beat_integration_test.go +++ b/libbeat/cmd/instance/beat_integration_test.go @@ -27,7 +27,6 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/mock" "github.com/elastic/elastic-agent-libs/config" @@ -79,7 +78,6 @@ func (mb mockbeat) Stop() { } func TestMonitoringNameFromConfig(t *testing.T) { - mockBeat := mockbeat{ done: make(chan struct{}), initDone: make(chan struct{}), @@ -93,8 +91,6 @@ func TestMonitoringNameFromConfig(t *testing.T) { go func() { defer wg.Done() - // Initialize cfgfile flags - cfgfile.InitFlags() // Set the configuration file path flag so the beat can read it _ = flag.Set("c", "testdata/mockbeat.yml") _ = instance.Run(mock.Settings, func(_ *beat.Beat, _ *config.C) (beat.Beater, error) { @@ -118,10 +114,15 @@ func TestMonitoringNameFromConfig(t *testing.T) { if err != nil { t.Fatalf("error creating request: %v", err) } + resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatalf("calling state endpoint: %v", err) } + + if err != nil { + t.Fatal("calling state endpoint: ", err.Error()) + } defer resp.Body.Close() beatName := struct { diff --git a/libbeat/cmd/root.go b/libbeat/cmd/root.go index 671b72fde1ec..589d706fc615 100644 --- a/libbeat/cmd/root.go +++ b/libbeat/cmd/root.go @@ -81,9 +81,6 @@ func GenRootCmdWithSettings(beatCreator beat.Creator, settings instance.Settings panic(fmt.Errorf("failed to set default config file path: %w", err)) } - // Initialize the configuration flags. - cfgfile.InitFlags() - // must be updated prior to CLI flag handling. rootCmd.RunCmd = genRunCmd(settings, beatCreator) From 2acdba9b575ddd74f1e5909859fea8dbff850ff4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 20:47:45 +0000 Subject: [PATCH 07/90] [Automation] Bump Golang version to 1.22.8 (#41160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update .go-version Made with ❤️️ by updatecli * chore: Update go.mod Made with ❤️️ by updatecli * Fix go mod tidy output. --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Craig MacKenzie --- .go-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.go-version b/.go-version index 87b26e8b1aa0..229a27c6f204 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.7 +1.22.8 From 904d14b664b14990a84905a06a63fe0e6f5ab650 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Mon, 7 Oct 2024 16:23:18 -0600 Subject: [PATCH 08/90] [aws] Change log.file.path field in awscloudwatch input to nested object (#41099) Change log.file.path field in awscloudwatch input to nested object --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/awscloudwatch/input_test.go | 6 +++++- x-pack/filebeat/input/awscloudwatch/processor.go | 8 ++++++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 46a17a8fea97..3ad14f0f60e2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -44,6 +44,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Journald: `seek` and `since` behaviour have been simplified, if there is a cursor (state) `seek` and `since` are ignored and the cursor is used. {pull}40061[40061] - Redis: Added replication role as a field to submitted slowlogs - Added `container.image.name` to `journald` Filebeat input's Docker-specific translated fields. {pull}40450[40450] +- Change log.file.path field in awscloudwatch input to nested object. {pull}41099[41099] - Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] diff --git a/x-pack/filebeat/input/awscloudwatch/input_test.go b/x-pack/filebeat/input/awscloudwatch/input_test.go index ebdd74c438e1..25ecc18ea57c 100644 --- a/x-pack/filebeat/input/awscloudwatch/input_test.go +++ b/x-pack/filebeat/input/awscloudwatch/input_test.go @@ -29,7 +29,11 @@ func TestCreateEvent(t *testing.T) { "event": mapstr.M{ "id": *logEvent.EventId, }, - "log.file.path": "logGroup1" + "/" + *logEvent.LogStreamName, + "log": mapstr.M{ + "file": mapstr.M{ + "path": "logGroup1" + "/" + *logEvent.LogStreamName, + }, + }, "aws.cloudwatch": mapstr.M{ "log_group": "logGroup1", "log_stream": *logEvent.LogStreamName, diff --git a/x-pack/filebeat/input/awscloudwatch/processor.go b/x-pack/filebeat/input/awscloudwatch/processor.go index 8ddacc16b233..818ba85d57ec 100644 --- a/x-pack/filebeat/input/awscloudwatch/processor.go +++ b/x-pack/filebeat/input/awscloudwatch/processor.go @@ -44,8 +44,12 @@ func createEvent(logEvent types.FilteredLogEvent, logGroup string, regionName st event := beat.Event{ Timestamp: time.Unix(*logEvent.Timestamp/1000, 0).UTC(), Fields: mapstr.M{ - "message": *logEvent.Message, - "log.file.path": logGroup + "/" + *logEvent.LogStreamName, + "message": *logEvent.Message, + "log": mapstr.M{ + "file": mapstr.M{ + "path": logGroup + "/" + *logEvent.LogStreamName, + }, + }, "event": mapstr.M{ "id": *logEvent.EventId, "ingested": time.Now(), From 485b83a5735aa869f31d68d8c3d51b911faa647f Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Tue, 8 Oct 2024 14:56:54 +1030 Subject: [PATCH 09/90] x-pack/filebeat/input/azureblobstorage: add support for CSV decoding (#40978) The test file txn.csv.gz was obtained from https://netskopepartnerlogfilebucket.s3.amazonaws.com/txn-1722875066329034-fe10b6a23cc643c4b282e6190de2352d.csv.gz --- CHANGELOG.next.asciidoc | 1 + .../inputs/input-azure-blob-storage.asciidoc | 55 ++ .../filebeat/input/azureblobstorage/config.go | 9 + .../input/azureblobstorage/decoding.go | 47 ++ .../input/azureblobstorage/decoding_config.go | 54 ++ .../input/azureblobstorage/decoding_csv.go | 139 ++++ .../input/azureblobstorage/decoding_test.go | 237 +++++++ .../filebeat/input/azureblobstorage/input.go | 2 + .../input/azureblobstorage/input_stateless.go | 1 + x-pack/filebeat/input/azureblobstorage/job.go | 70 ++- .../input/azureblobstorage/testdata/txn.csv | 5 + .../azureblobstorage/testdata/txn.csv.gz | Bin 0 -> 2527 bytes .../input/azureblobstorage/testdata/txn.json | 594 ++++++++++++++++++ .../filebeat/input/azureblobstorage/types.go | 1 + 14 files changed, 1195 insertions(+), 20 deletions(-) create mode 100644 x-pack/filebeat/input/azureblobstorage/decoding.go create mode 100644 x-pack/filebeat/input/azureblobstorage/decoding_config.go create mode 100644 x-pack/filebeat/input/azureblobstorage/decoding_csv.go create mode 100644 x-pack/filebeat/input/azureblobstorage/decoding_test.go create mode 100644 x-pack/filebeat/input/azureblobstorage/testdata/txn.csv create mode 100644 x-pack/filebeat/input/azureblobstorage/testdata/txn.csv.gz create mode 100644 x-pack/filebeat/input/azureblobstorage/testdata/txn.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3ad14f0f60e2..b36bf002ffe1 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -317,6 +317,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add support to CEL for reading host environment variables. {issue}40762[40762] {pull}40779[40779] - Add CSV decoder to awss3 input. {pull}40896[40896] - Change request trace logging to include headers instead of complete request. {pull}41072[41072] +- Add CSV decoding capacity to azureblobstorage input {pull}40978[40978] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc b/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc index d6c1b4c90501..0ee02cf91d78 100644 --- a/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc @@ -247,6 +247,61 @@ Example : `10s` would mean we would like the polling to occur every 10 seconds. This attribute can be specified both at the root level of the configuration as well at the container level. The container level values will always take priority and override the root level values if both are specified. +[id="input-{type}-encoding"] +[float] +==== `encoding` + +The file encoding to use for reading data that contains international +characters. This only applies to non-JSON logs. See <<_encoding_3>>. + +[id="input-{type}-decoding"] +[float] +==== `decoding` + +The file decoding option is used to specify a codec that will be used to +decode the file contents. This can apply to any file stream data. +An example config is shown below: + +Currently supported codecs are given below:- + + 1. <>: This codec decodes RFC 4180 CSV data streams. + +[id="attrib-decoding-csv-azureblobstorage"] +[float] +==== `the CSV codec` +The `CSV` codec is used to decode RFC 4180 CSV data streams. +Enabling the codec without other options will use the default codec options. + +[source,yaml] +---- + decoding.codec.csv.enabled: true +---- + +The CSV codec supports five sub attributes to control aspects of CSV decoding. +The `comma` attribute specifies the field separator character used by the CSV +format. If it is not specified, the comma character '`,`' is used. The `comment` +attribute specifies the character that should be interpreted as a comment mark. +If it is specified, lines starting with the character will be ignored. Both +`comma` and `comment` must be single characters. The `lazy_quotes` attribute +controls how quoting in fields is handled. If `lazy_quotes` is true, a quote may +appear in an unquoted field and a non-doubled quote may appear in a quoted field. +The `trim_leading_space` attribute specifies that leading white space should be +ignored, even if the `comma` character is white space. For complete details +of the preceding configuration attribute behaviors, see the CSV decoder +https://pkg.go.dev/encoding/csv#Reader[documentation] The `fields_names` +attribute can be used to specify the column names for the data. If it is +absent, the field names are obtained from the first non-comment line of +data. The number of fields must match the number of field names. + +An example config is shown below: + +[source,yaml] +---- + decoding.codec.csv.enabled: true + decoding.codec.csv.comma: "\t" + decoding.codec.csv.comment: "#" +---- + [id="attrib-file_selectors"] [float] ==== `file_selectors` diff --git a/x-pack/filebeat/input/azureblobstorage/config.go b/x-pack/filebeat/input/azureblobstorage/config.go index 5367596935a1..5923e8ce7bb9 100644 --- a/x-pack/filebeat/input/azureblobstorage/config.go +++ b/x-pack/filebeat/input/azureblobstorage/config.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/elastic/beats/v7/libbeat/common/match" + "github.com/elastic/beats/v7/libbeat/reader/parser" ) // MaxWorkers, Poll, PollInterval, FileSelectors, TimeStampEpoch & ExpandEventListFromField can @@ -25,6 +26,7 @@ type config struct { PollInterval *time.Duration `config:"poll_interval"` Containers []container `config:"containers" validate:"required"` FileSelectors []fileSelectorConfig `config:"file_selectors"` + ReaderConfig readerConfig `config:",inline"` TimeStampEpoch *int64 `config:"timestamp_epoch"` ExpandEventListFromField string `config:"expand_event_list_from_field"` } @@ -36,6 +38,7 @@ type container struct { Poll *bool `config:"poll"` PollInterval *time.Duration `config:"poll_interval"` FileSelectors []fileSelectorConfig `config:"file_selectors"` + ReaderConfig readerConfig `config:",inline"` TimeStampEpoch *int64 `config:"timestamp_epoch"` ExpandEventListFromField string `config:"expand_event_list_from_field"` } @@ -46,6 +49,12 @@ type fileSelectorConfig struct { // TODO: Add support for reader config in future } +// readerConfig defines the options for reading the content of an azure container. +type readerConfig struct { + Parsers parser.Config `config:",inline"` + Decoding decoderConfig `config:"decoding"` +} + type authConfig struct { SharedCredentials *sharedKeyConfig `config:"shared_credentials"` ConnectionString *connectionStringConfig `config:"connection_string"` diff --git a/x-pack/filebeat/input/azureblobstorage/decoding.go b/x-pack/filebeat/input/azureblobstorage/decoding.go new file mode 100644 index 000000000000..77f0a1e8c2c7 --- /dev/null +++ b/x-pack/filebeat/input/azureblobstorage/decoding.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azureblobstorage + +import ( + "fmt" + "io" +) + +// decoder is an interface for decoding data from an io.Reader. +type decoder interface { + // decode reads and decodes data from an io reader based on the codec type. + // It returns the decoded data and an error if the data cannot be decoded. + decode() ([]byte, error) + // next advances the decoder to the next data item and returns true if there is more data to be decoded. + next() bool + // close closes the decoder and releases any resources associated with it. + // It returns an error if the decoder cannot be closed. + + // more returns whether there are more records to read. + more() bool + + close() error +} + +// valueDecoder is a decoder that can decode directly to a JSON serialisable value. +type valueDecoder interface { + decoder + + decodeValue() ([]byte, map[string]any, error) +} + +// newDecoder creates a new decoder based on the codec type. +// It returns a decoder type and an error if the codec type is not supported. +// If the reader config codec option is not set, it returns a nil decoder and nil error. +func newDecoder(cfg decoderConfig, r io.Reader) (decoder, error) { + switch { + case cfg.Codec == nil: + return nil, nil + case cfg.Codec.CSV != nil: + return newCSVDecoder(cfg, r) + default: + return nil, fmt.Errorf("unsupported config value: %v", cfg) + } +} diff --git a/x-pack/filebeat/input/azureblobstorage/decoding_config.go b/x-pack/filebeat/input/azureblobstorage/decoding_config.go new file mode 100644 index 000000000000..3f680873bf78 --- /dev/null +++ b/x-pack/filebeat/input/azureblobstorage/decoding_config.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azureblobstorage + +import ( + "fmt" + "unicode/utf8" +) + +// decoderConfig contains the configuration options for instantiating a decoder. +type decoderConfig struct { + Codec *codecConfig `config:"codec"` +} + +// codecConfig contains the configuration options for different codecs used by a decoder. +type codecConfig struct { + CSV *csvCodecConfig `config:"csv"` +} + +// csvCodecConfig contains the configuration options for the CSV codec. +type csvCodecConfig struct { + Enabled bool `config:"enabled"` + + // Fields is the set of field names. If it is present + // it is used to specify the object names of returned + // values and the FieldsPerRecord field in the csv.Reader. + // Otherwise, names are obtained from the first + // line of the CSV data. + Fields []string `config:"fields_names"` + + // The fields below have the same meaning as the + // fields of the same name in csv.Reader. + Comma *configRune `config:"comma"` + Comment configRune `config:"comment"` + LazyQuotes bool `config:"lazy_quotes"` + TrimLeadingSpace bool `config:"trim_leading_space"` +} + +type configRune rune + +func (r *configRune) Unpack(s string) error { + if s == "" { + return nil + } + n := utf8.RuneCountInString(s) + if n != 1 { + return fmt.Errorf("single character option given more than one character: %q", s) + } + _r, _ := utf8.DecodeRuneInString(s) + *r = configRune(_r) + return nil +} diff --git a/x-pack/filebeat/input/azureblobstorage/decoding_csv.go b/x-pack/filebeat/input/azureblobstorage/decoding_csv.go new file mode 100644 index 000000000000..b28be94c5232 --- /dev/null +++ b/x-pack/filebeat/input/azureblobstorage/decoding_csv.go @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azureblobstorage + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "slices" +) + +// csvDecoder is a decoder for CSV data. +type csvDecoder struct { + r *csv.Reader + + header []string + current []string + coming []string + + err error +} + +// newCSVDecoder creates a new CSV decoder. +func newCSVDecoder(config decoderConfig, r io.Reader) (decoder, error) { + d := csvDecoder{r: csv.NewReader(r)} + d.r.ReuseRecord = true + if config.Codec.CSV.Comma != nil { + d.r.Comma = rune(*config.Codec.CSV.Comma) + } + d.r.Comment = rune(config.Codec.CSV.Comment) + d.r.LazyQuotes = config.Codec.CSV.LazyQuotes + d.r.TrimLeadingSpace = config.Codec.CSV.TrimLeadingSpace + if len(config.Codec.CSV.Fields) != 0 { + d.r.FieldsPerRecord = len(config.Codec.CSV.Fields) + d.header = config.Codec.CSV.Fields + } else { + h, err := d.r.Read() + if err != nil { + return nil, err + } + d.header = slices.Clone(h) + } + var err error + d.coming, err = d.r.Read() + if err != nil { + return nil, err + } + d.current = make([]string, 0, len(d.header)) + return &d, nil +} + +func (d *csvDecoder) more() bool { return len(d.coming) == len(d.header) } + +// next advances the decoder to the next data item and returns true if +// there is more data to be decoded. +func (d *csvDecoder) next() bool { + if !d.more() && d.err != nil { + return false + } + d.current = d.current[:len(d.header)] + copy(d.current, d.coming) + d.coming, d.err = d.r.Read() + if d.err == io.EOF { + d.coming = nil + } + return true +} + +// decode returns the JSON encoded value of the current CSV line. next must +// have been called before any calls to decode. +func (d *csvDecoder) decode() ([]byte, error) { + err := d.check() + if err != nil { + return nil, err + } + var buf bytes.Buffer + buf.WriteByte('{') + for i, n := range d.header { + if i != 0 { + buf.WriteByte(',') + } + buf.WriteByte('"') + buf.WriteString(n) + buf.WriteString(`":"`) + buf.WriteString(d.current[i]) + buf.WriteByte('"') + } + buf.WriteByte('}') + d.current = d.current[:0] + return buf.Bytes(), nil +} + +// decodeValue returns the value of the current CSV line interpreted as +// an object with fields based on the header held by the receiver. next must +// have been called before any calls to decode. +func (d *csvDecoder) decodeValue() ([]byte, map[string]any, error) { + err := d.check() + if err != nil { + return nil, nil, err + } + m := make(map[string]any, len(d.header)) + for i, n := range d.header { + m[n] = d.current[i] + } + d.current = d.current[:0] + b, err := d.decode() + if err != nil { + return nil, nil, err + } + return b, m, nil +} + +func (d *csvDecoder) check() error { + if d.err != nil { + if d.err == io.EOF && d.coming == nil { + return nil + } + return d.err + } + if len(d.current) == 0 { + return fmt.Errorf("decode called before next") + } + // By the time we are here, current must be the same + // length as header; if it was not read, it would be + // zero, but if it was, it must match by the contract + // of the csv.Reader. + return nil +} + +// close closes the csv decoder and releases the resources. +func (d *csvDecoder) close() error { + if d.err == io.EOF { + return nil + } + return d.err +} diff --git a/x-pack/filebeat/input/azureblobstorage/decoding_test.go b/x-pack/filebeat/input/azureblobstorage/decoding_test.go new file mode 100644 index 000000000000..e541fc742796 --- /dev/null +++ b/x-pack/filebeat/input/azureblobstorage/decoding_test.go @@ -0,0 +1,237 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azureblobstorage + +import ( + "context" + "encoding/json" + "errors" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + azcontainer "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/beat" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" +) + +// all test files are read from the "testdata" directory +const testDataPath = "testdata" + +func TestDecoding(t *testing.T) { + logp.TestingSetup() + log := logp.L() + + testCases := []struct { + name string + file string + content string + contentType string + numEvents int + assertAgainst string + config decoderConfig + }{ + { + name: "gzip_csv", + file: "txn.csv.gz", + content: "text/csv", + numEvents: 4, + assertAgainst: "txn.json", + config: decoderConfig{ + Codec: &codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune](' '), + }, + }, + }, + }, + { + name: "csv", + file: "txn.csv", + content: "text/csv", + numEvents: 4, + assertAgainst: "txn.json", + config: decoderConfig{ + Codec: &codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune](' '), + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + file := filepath.Join(testDataPath, tc.file) + if tc.contentType == "" { + tc.contentType = "application/octet-stream" + } + f, err := os.Open(file) + if err != nil { + t.Fatalf("failed to open test data: %v", err) + } + defer f.Close() + p := &pub{t: t} + item := &azcontainer.BlobItem{ + Name: ptr("test_blob"), + Properties: &azcontainer.BlobProperties{ + ContentType: ptr(tc.content), + LastModified: &time.Time{}, + }, + } + j := newJob(&blob.Client{}, item, "https://foo.blob.core.windows.net/", newState(), &Source{}, p, log) + j.src.ReaderConfig.Decoding = tc.config + err = j.decode(context.Background(), f, "test") + if err != nil { + t.Errorf("unexpected error calling decode: %v", err) + } + + events := p.events + if tc.assertAgainst != "" { + targetData := readJSONFromFile(t, filepath.Join(testDataPath, tc.assertAgainst)) + assert.Equal(t, len(targetData), len(events)) + + for i, event := range events { + msg, err := event.Fields.GetValue("message") + assert.NoError(t, err) + assert.JSONEq(t, targetData[i], msg.(string)) + } + } + }) + } +} + +type pub struct { + t *testing.T + events []beat.Event +} + +func (p *pub) Publish(e beat.Event, _cursor interface{}) error { + p.t.Logf("%v\n", e.Fields) + p.events = append(p.events, e) + return nil +} + +// readJSONFromFile reads the json file and returns the data as a slice of strings +func readJSONFromFile(t *testing.T, filepath string) []string { + fileBytes, err := os.ReadFile(filepath) + assert.NoError(t, err) + var rawMessages []json.RawMessage + err = json.Unmarshal(fileBytes, &rawMessages) + assert.NoError(t, err) + var data []string + + for _, rawMsg := range rawMessages { + data = append(data, string(rawMsg)) + } + return data +} + +var codecConfigTests = []struct { + name string + yaml string + want decoderConfig + wantErr error +}{ + { + name: "handle_rune", + yaml: ` +codec: + csv: + enabled: true + comma: ' ' + comment: '#' +`, + want: decoderConfig{&codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune](' '), + Comment: '#', + }, + }}, + }, + { + name: "no_comma", + yaml: ` +codec: + csv: + enabled: true +`, + want: decoderConfig{&codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + }, + }}, + }, + { + name: "null_comma", + yaml: ` +codec: + csv: + enabled: true + comma: "\u0000" +`, + want: decoderConfig{&codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune]('\x00'), + }, + }}, + }, + { + name: "bad_rune", + yaml: ` +codec: + csv: + enabled: true + comma: 'this is too long' +`, + wantErr: errors.New(`single character option given more than one character: "this is too long" accessing 'codec.csv.comma'`), + }, +} + +func TestCodecConfig(t *testing.T) { + for _, test := range codecConfigTests { + t.Run(test.name, func(t *testing.T) { + c, err := conf.NewConfigWithYAML([]byte(test.yaml), "") + if err != nil { + t.Fatalf("unexpected error unmarshaling config: %v", err) + } + + var got decoderConfig + err = c.Unpack(&got) + if !sameError(err, test.wantErr) { + t.Errorf("unexpected error unpacking config: got:%v want:%v", err, test.wantErr) + } + + if !reflect.DeepEqual(got, test.want) { + t.Errorf("unexpected result\n--- want\n+++ got\n%s", cmp.Diff(test.want, got)) + } + }) + } +} + +func sameError(a, b error) bool { + switch { + case a == nil && b == nil: + return true + case a == nil, b == nil: + return false + default: + return a.Error() == b.Error() + } +} + +func ptr[T any](v T) *T { return &v } diff --git a/x-pack/filebeat/input/azureblobstorage/input.go b/x-pack/filebeat/input/azureblobstorage/input.go index 245daaef6e5b..7c7989ccbce3 100644 --- a/x-pack/filebeat/input/azureblobstorage/input.go +++ b/x-pack/filebeat/input/azureblobstorage/input.go @@ -71,6 +71,7 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { TimeStampEpoch: container.TimeStampEpoch, ExpandEventListFromField: container.ExpandEventListFromField, FileSelectors: container.FileSelectors, + ReaderConfig: container.ReaderConfig, }) } @@ -120,6 +121,7 @@ func tryOverrideOrDefault(cfg config, c container) container { if len(c.FileSelectors) == 0 && len(cfg.FileSelectors) != 0 { c.FileSelectors = cfg.FileSelectors } + c.ReaderConfig = cfg.ReaderConfig return c } diff --git a/x-pack/filebeat/input/azureblobstorage/input_stateless.go b/x-pack/filebeat/input/azureblobstorage/input_stateless.go index 73ae14a62e58..8bff050c1123 100644 --- a/x-pack/filebeat/input/azureblobstorage/input_stateless.go +++ b/x-pack/filebeat/input/azureblobstorage/input_stateless.go @@ -57,6 +57,7 @@ func (in *statelessInput) Run(inputCtx v2.Context, publisher stateless.Publisher TimeStampEpoch: container.TimeStampEpoch, ExpandEventListFromField: container.ExpandEventListFromField, FileSelectors: container.FileSelectors, + ReaderConfig: container.ReaderConfig, } st := newState() diff --git a/x-pack/filebeat/input/azureblobstorage/job.go b/x-pack/filebeat/input/azureblobstorage/job.go index f886f330af6a..ffc8fd04dcd4 100644 --- a/x-pack/filebeat/input/azureblobstorage/job.go +++ b/x-pack/filebeat/input/azureblobstorage/job.go @@ -134,20 +134,47 @@ func (j *job) processAndPublishData(ctx context.Context, id string) error { } }() - err = j.readJsonAndPublish(ctx, reader, id) - if err != nil { - return fmt.Errorf("failed to read data from blob with error: %w", err) - } - - return err + return j.decode(ctx, reader, id) } -func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) error { +func (j *job) decode(ctx context.Context, r io.Reader, id string) error { r, err := j.addGzipDecoderIfNeeded(bufio.NewReader(r)) if err != nil { return fmt.Errorf("failed to add gzip decoder to blob: %s, with error: %w", *j.blob.Name, err) } + dec, err := newDecoder(j.src.ReaderConfig.Decoding, r) + if err != nil { + return err + } + var evtOffset int64 + switch dec := dec.(type) { + case decoder: + defer dec.close() + + for dec.next() { + msg, err := dec.decode() + if err != nil { + if err == io.EOF { + return nil + } + break + } + evt := j.createEvent(string(msg), evtOffset) + j.publish(evt, !dec.more(), id) + } + default: + err = j.readJsonAndPublish(ctx, r, id) + if err != nil { + return fmt.Errorf("failed to read data from blob with error: %w", err) + } + } + + return err +} + +func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) error { + var err error // checks if the root element is an array or not r, j.isRootArray, err = evaluateJSON(bufio.NewReader(r)) if err != nil { @@ -183,22 +210,25 @@ func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) er return err } evt := j.createEvent(string(data), offset) + j.publish(evt, !dec.More(), id) + } + return nil +} - if !dec.More() { - // if this is the last object, then perform a complete state save - cp, done := j.state.saveForTx(*j.blob.Name, *j.blob.Properties.LastModified) - if err := j.publisher.Publish(evt, cp); err != nil { - j.log.Errorf(jobErrString, id, err) - } - done() - } else { - // since we don't update the cursor checkpoint, lack of a lock here should be fine - if err := j.publisher.Publish(evt, nil); err != nil { - j.log.Errorf(jobErrString, id, err) - } +func (j *job) publish(evt beat.Event, last bool, id string) { + if last { + // if this is the last object, then perform a complete state save + cp, done := j.state.saveForTx(*j.blob.Name, *j.blob.Properties.LastModified) + if err := j.publisher.Publish(evt, cp); err != nil { + j.log.Errorf(jobErrString, id, err) } + done() + return + } + // since we don't update the cursor checkpoint, lack of a lock here should be fine + if err := j.publisher.Publish(evt, nil); err != nil { + j.log.Errorf(jobErrString, id, err) } - return nil } // splitEventList splits the event list into individual events and publishes them diff --git a/x-pack/filebeat/input/azureblobstorage/testdata/txn.csv b/x-pack/filebeat/input/azureblobstorage/testdata/txn.csv new file mode 100644 index 000000000000..80ca65df21ef --- /dev/null +++ b/x-pack/filebeat/input/azureblobstorage/testdata/txn.csv @@ -0,0 +1,5 @@ +date time time-taken cs-bytes sc-bytes bytes c-ip s-ip cs-username cs-method cs-uri-scheme cs-uri-query cs-user-agent cs-content-type sc-status sc-content-type cs-dns cs-host cs-uri cs-uri-port cs-referer x-cs-session-id x-cs-access-method x-cs-app x-s-country x-s-latitude x-s-longitude x-s-location x-s-region x-s-zipcode x-c-country x-c-latitude x-c-longitude x-c-location x-c-region x-c-zipcode x-c-os x-c-browser x-c-browser-version x-c-device x-cs-site x-cs-timestamp x-cs-page-id x-cs-userip x-cs-traffic-type x-cs-tunnel-id x-category x-other-category x-type x-server-ssl-err x-client-ssl-err x-transaction-id x-request-id x-cs-sni x-cs-domain-fronted-sni x-category-id x-other-category-id x-sr-headers-name x-sr-headers-value x-cs-ssl-ja3 x-sr-ssl-ja3s x-ssl-bypass x-ssl-bypass-reason x-r-cert-subject-cn x-r-cert-issuer-cn x-r-cert-startdate x-r-cert-enddate x-r-cert-valid x-r-cert-expired x-r-cert-untrusted-root x-r-cert-incomplete-chain x-r-cert-self-signed x-r-cert-revoked x-r-cert-revocation-check x-r-cert-mismatch x-cs-ssl-fronting-error x-cs-ssl-handshake-error x-sr-ssl-handshake-error x-sr-ssl-client-certificate-error x-sr-ssl-malformed-ssl x-s-custom-signing-ca-error x-cs-ssl-engine-action x-cs-ssl-engine-action-reason x-sr-ssl-engine-action x-sr-ssl-engine-action-reason x-ssl-policy-src-ip x-ssl-policy-dst-ip x-ssl-policy-dst-host x-ssl-policy-dst-host-source x-ssl-policy-categories x-ssl-policy-action x-ssl-policy-name x-cs-ssl-version x-cs-ssl-cipher x-sr-ssl-version x-sr-ssl-cipher x-cs-src-ip-egress x-s-dp-name x-cs-src-ip x-cs-src-port x-cs-dst-ip x-cs-dst-port x-sr-src-ip x-sr-src-port x-sr-dst-ip x-sr-dst-port x-cs-ip-connect-xff x-cs-ip-xff x-cs-connect-host x-cs-connect-port x-cs-connect-user-agent x-cs-url x-cs-uri-path x-cs-http-version rs-status x-cs-app-category x-cs-app-cci x-cs-app-ccl x-cs-app-tags x-cs-app-suite x-cs-app-instance-id x-cs-app-instance-name x-cs-app-instance-tag x-cs-app-activity x-cs-app-from-user x-cs-app-to-user x-cs-app-object-type x-cs-app-object-name x-cs-app-object-id x-rs-file-type x-rs-file-category x-rs-file-language x-rs-file-size x-rs-file-md5 x-rs-file-sha256 x-error x-c-local-time x-policy-action x-policy-name x-policy-src-ip x-policy-dst-ip x-policy-dst-host x-policy-dst-host-source x-policy-justification-type x-policy-justification-reason x-sc-notification-name +2024-08-05 16:24:20 64 2971 2050 5021 10.5.78.159 204.79.197.237 "vikash.ranjan@riverbed.com" GET https cc=US&setlang=en-US "Mozilla/5.0 (Windows NT 10.0; Win64; x64; Cortana 1.14.7.19041; 10.0.0.0.19045.2006) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19045" - 200 "application/json; charset=utf-8" www.bing.com www.bing.com /client/config?cc=US&setlang=en-US 443 - 3683772769278232507 "Client" "Microsoft Bing" "US" 47.682899 -122.120903 "Redmond" "Washington" "N/A" "US" 29.775400 -95.598000 "Houston" "Texas" "77079" "Windows 10" "Edge" "18.19045" "Windows Device" "bing" 1722875060 5762388460300455936 10.5.78.159 CloudApp - "Search Engines" - http_transaction - - 2696581500064586450 2901306739654139904 www.bing.com - 551 - - - 28a2c9bd18a11de089ef85a160da29e4 NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No No NotChecked NotChecked NotChecked No Allow "Established" None "NotEstablished" 10.5.78.159 69.192.139.97 www.bing.com Sni "Search Engines" Decrypt - TLSv1.2 ECDHE-RSA-AES256-GCM-SHA384 NotChecked NotChecked 208.185.23.18 "US-ATL2" 10.5.78.159 25941 69.192.139.97 443 - - 10.144.54.201 842 - - - - - https://www.bing.com/client/config?cc=US&setlang=en-US /client/config HTTP1.1 200 "Search Engines" 58 low "Consumer,Unsanctioned" - - - - "Browse" - - - - - - - - - - - - "2024-08-05 11:24:00" "allow" "NetskopeAllow" 10.5.78.159 204.79.197.237 www.bing.com HttpHostHeader - - - +2024-08-05 16:24:19 - 18 0 18 10.70.0.19 - "nadav@skyformation.onmicrosoft.com" PRI - - - - - - - us-west1-b-osconfig.googleapis.com * 443 - 0 "Client" - - - - - - - "US" 45.605600 -121.180700 "The Dalles" "Oregon" "97058" - - - - - 1722875059 0 10.70.0.19 - - "Technology" "Cloud Storage" http_transaction - - 2035489204758272484 0 us-west1-b-osconfig.googleapis.com - 564 "7" - - 7a15285d4efc355608b304698cd7f9ab NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No No NotChecked NotChecked NotChecked No Allow "Established" None "NotEstablished" 10.70.0.19 142.250.99.95 us-west1-b-osconfig.googleapis.com Sni "Technology, Cloud Storage" Decrypt - TLSv1.3 TLS_AES_256_GCM_SHA384 NotChecked NotChecked 34.82.190.203 "US-SEA2" 10.70.0.19 32951 142.250.99.95 443 - - - - - - - - - - - HTTP1.1 - - - - - - - - - - - - - - - - - - - - - http-malformed "NotChecked" NotChecked - - - - - - - - +2024-08-05 16:24:20 - 0 0 0 10.0.20.111 - "levente.fangli@cososys.com" - - - - - - - - achecker-alliances.eu.goskope.com - 443 - 0 "Client" - - - - - - - "RO" 46.765700 23.594300 "Cluj-Napoca" "Cluj County" "400027" - - - - - 1722875060 0 10.0.20.111 - - - - http_transaction - "HsFailure (error:14094418:SSL routines:ssl3_read_bytes:tlsv1 alert unknown ca)" 1350739992944030464 0 achecker-alliances.eu.goskope.com - - - - - bc29aa426fc99c0be1b9be941869f88a NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No Yes NotChecked NotChecked NotChecked No Block "SSL Error - SSL Handshake Error" None "NotEstablished" - - - Unknown - Decrypt - - - NotChecked NotChecked 81.196.156.53 "AT-VIE1" 10.0.20.111 57897 31.186.239.94 443 - - - - - - - - - - - UNKNOWN - - - - - - - - - - - - - - - - - - - - - client-ssl "NotChecked" NotChecked - - - - - - - - +2024-08-05 16:24:23 - 0 0 0 10.0.20.111 - "levente.fangli@cososys.com" - - - - - - - - achecker-alliances.eu.goskope.com - 443 - 0 "Client" - - - - - - - "RO" 46.765700 23.594300 "Cluj-Napoca" "Cluj County" "400027" - - - - - 1722875063 0 10.0.20.111 - - - - http_transaction - "HsFailure (error:14094418:SSL routines:ssl3_read_bytes:tlsv1 alert unknown ca)" 1615432978285898071 0 achecker-alliances.eu.goskope.com - - - - - bc29aa426fc99c0be1b9be941869f88a NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No Yes NotChecked NotChecked NotChecked No Block "SSL Error - SSL Handshake Error" None "NotEstablished" - - - Unknown - Decrypt - - - NotChecked NotChecked 81.196.156.53 "AT-VIE1" 10.0.20.111 57897 31.186.239.94 443 - - - - - - - - - - - UNKNOWN - - - - - - - - - - - - - - - - - - - - - client-ssl "NotChecked" NotChecked - - - - - - - - diff --git a/x-pack/filebeat/input/azureblobstorage/testdata/txn.csv.gz b/x-pack/filebeat/input/azureblobstorage/testdata/txn.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..52e8fb20539a40e3127997a8877e3346ea1c6dfc GIT binary patch literal 2527 zcmV<52_W_#iwFP!00000|8!Mbn(H0bbhgbUzHnwq|6>2XLC4)%VlQk~5hX z8-672pb3p{=C<+y!!HguQuufp>rJT=J&Y^wJt84{jZSZ)s>1!Ywi1GuU1 zNW^-&SfG8sJs|mT1e0f`J({y);=;tRj2xl47&0TgH1dzzDdsaYsG?C7T~yJiin=Pg zsEV3@9}@r55>%t%&Z6zg#CJzrV|G@lR7Ex1_QLMC@R^BJ09OSjIM0qsk_&Q#z`h}5Vg2ZTpxj=E-JsSh9=tisTXHd6SuP)h~ZfLajROK?bu%t3Z>G+Ca(iOk>6x4M7D5x!w?6}#b;=P{O(9cn} zRCM)aCJ(i^<%p6Ggs*OJSX^Kzjr9ZEOIYo^UU~-)m(;@^y`v|na2amuach}27@z-vD9l_V}`#yRRBdnDHW2YPaiJ6tYDfd9TlNM^3p&sZOZ`R-AEh#wip6xKRu^l@ui93>YPn zC{36&xGRVhPGr)pQQ)aoh}0*B5uyi@}YZ z^{Wf#)4K|ioA7trbvBcP!pr;CH(@ux?L3B~=YSi9IEPR1H3|J_y#+=XM#tb(u=Bm% z{)w_AoYLYIl$%ZG?%m>TJ7zLhBro9Q+u8i)`ZaXzgM%wqKZI8>S@+@5Wh$EIGRN8| zhsrK(-+nfmHY=Ageej+@iltLXEc$P2JGQ}RABW)2psJwi zQz5In*7-53gI~6*KmH37A#x0q7dlsptHN-ldCnz8peK9d1Yv9IJ`77E9YaWAdQ0X37LiI6t6mGNf*^V2cloVK5Y9x_H(^%vzJPilU zx%*@XT$Q5AL^sbgF^WP&2$wGy{d;`rwrw5H7W)*L;#@093v4J=@LX$A(43-;G>G(Z ze@(jZ)F6onLsB{0*K3&iRqNdlDTv+lVZ>$|Y#k}UE###zh15)9>V=RR{}k(@ET@Ia z@kp@TV1B1$q#z~Ro1+75d2JRA)0VNuQEl8(OUnw{SZpjXcKwa^vt2Hm*F# z|8rBHur@;gW?>j-&Ni+;g!=ure*h9uW=ReP001A02mk;800003ol{MZ+aM6VSL%Nl zajXgg3^wekRyOKZ>c^^f&la)qht@HXVYAA=FHR24M%gy1#P7RhgVRI`r8yOuR6OHCi3odxR17i|fU*;{%rME7Y=mv=L`H3DlTeXr zRX16y%?7@^knhbfn$2kAV&NJ~tKQa6wn4Wp{^b9gz72PJ{)q(nIJy@=YagPP!S^Si z$sVC!RnLaQ1SeclqBYU-mg_C1;e47O;bNHgww@2W5bv@4TI?A~mT#4>i;By%`n>v9 z6%Tn5hk0E`B8cLV7$wmFiz{)l&iO0K3D=Tc5I?#5T0E5HW3=)cyVqi|Bi@>E`3L_p zn2Wae?|uVN)}`m^0{{RYiwFP!000001MO2yZxb;Py(94-ET7=8mcQ0(=R^rXMW9ro z6vQd=I!?2!v7@y&DF43RO%SwAqzDd3sG7^nc>LbXXdcCs7g!d!1Vl_IpHK`6fJY-3 zVMH+^gybA_c58jIXr=wFYhIZUL)=AV!grXT@0&JjYg*eC+POx1V?Ifhti}7z4^ssW`)9egCbP>rF5k?jtY_y-&Lwaf~soe#ase zrLroqcs$bH)+ucksnb4clbYZM?#uXAH?Hq2cwxIPbQ8j{;+)9I_4NnPg+4Xj#z~Bh zEjp{0i+Y!GoLt-zpq=d!=zZ(M4-brf2?1dxu9#AaW)#dY7d&@!&X#^!ZK%?kQ?W8i z8C+XZtJciWLUy;gJ1rix~Jj5R~yfP0#Er7$S zXT$dMEQ5ck{W7j~Yxmg!-P@6G7E>O(iR*!LzptCa`U~*Jnr^pAhx~C;Nf1#2ky4 Date: Tue, 8 Oct 2024 10:56:24 +0530 Subject: [PATCH 10/90] [filebeat][GCS] - Fixed failed job handling and removed false-positive error logs (#41142) [filebeat][GCS] fix failed job handling and remove false-positive error logs In cases where GCS report an error, we were logging the error but not otherwise making use of the information. When GCS was reporting that a requested object does note exist this was causing unnecessary log-spam and not updating the set of failed jobs to remove the object, resulting in future futile re-attempts to collect the object. When any other error was reported, the set of failed jobs was not being updated, resulting in continued re-attempts, even past the maximum retry count. This fixes both cases by differentiating the two situations, logging only at debug level when the object is reported to be missing and removing the object from the failed rework set, and logging at error and increasing the failure count for all other error cases. --- CHANGELOG.next.asciidoc | 2 +- x-pack/filebeat/input/gcs/scheduler.go | 12 +++++++++++- x-pack/filebeat/input/gcs/state.go | 8 ++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b36bf002ffe1..395dd313dec2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -172,7 +172,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix long filepaths in diagnostics exceeding max path limits on Windows. {pull}40909[40909] - Add backup and delete for AWS S3 polling mode feature back. {pull}41071[41071] - Fix a bug in Salesforce input to only handle responses with 200 status code {pull}41015[41015] - +- Fixed failed job handling and removed false-positive error logs in the GCS input. {pull}41142[41142] *Heartbeat* diff --git a/x-pack/filebeat/input/gcs/scheduler.go b/x-pack/filebeat/input/gcs/scheduler.go index 45e7585c14ef..ef1bebd083d9 100644 --- a/x-pack/filebeat/input/gcs/scheduler.go +++ b/x-pack/filebeat/input/gcs/scheduler.go @@ -6,6 +6,7 @@ package gcs import ( "context" + "errors" "fmt" "slices" "sort" @@ -212,7 +213,16 @@ func (s *scheduler) addFailedJobs(ctx context.Context, jobs []*job) []*job { if !jobMap[name] { obj, err := s.bucket.Object(name).Attrs(ctx) if err != nil { - s.log.Errorf("adding failed job %s to job list caused an error: %v", name, err) + if errors.Is(err, storage.ErrObjectNotExist) { + // if the object is not found in the bucket, then remove it from the failed job list + s.state.deleteFailedJob(name) + s.log.Debugf("scheduler: failed job %s not found in bucket %s", name, s.src.BucketName) + } else { + // if there is an error while validating the object, + // then update the failed job retry count and work towards natural removal + s.state.updateFailedJobs(name) + s.log.Errorf("scheduler: adding failed job %s to job list caused an error: %v", name, err) + } continue } diff --git a/x-pack/filebeat/input/gcs/state.go b/x-pack/filebeat/input/gcs/state.go index 59f79fce471c..ea04edcae908 100644 --- a/x-pack/filebeat/input/gcs/state.go +++ b/x-pack/filebeat/input/gcs/state.go @@ -79,6 +79,14 @@ func (s *state) updateFailedJobs(jobName string) { s.mu.Unlock() } +// deleteFailedJob, deletes a failed job from the failedJobs map +// this is used when a job no longer exists in the bucket or gets expired +func (s *state) deleteFailedJob(jobName string) { + s.mu.Lock() + delete(s.cp.FailedJobs, jobName) + s.mu.Unlock() +} + // setCheckpoint, sets checkpoint from source to current state instance // If for some reason the current state is empty, assigns new states as // a fail safe mechanism From 4c1489d67c3f6e0903bcfe6fb05cbdc5ab54d979 Mon Sep 17 00:00:00 2001 From: Vinit Chauhan Date: Tue, 8 Oct 2024 02:58:38 -0400 Subject: [PATCH 11/90] Update Ubuntu 20.04 with 24.04 for Docker base images (#40942) * Replace Ubuntu 20.04 with 22.04 for Docker base images * Added link to pull request in CHANGELOG.next.asciidoc * update to ubuntu:24.04 Co-authored-by: Julien Lind * remove default ubuntu user in 24.04 while generating container. --------- Co-authored-by: Mauri de Souza Meneguzzo Co-authored-by: Julien Lind Co-authored-by: Andrzej Stencel --- CHANGELOG.next.asciidoc | 2 ++ dev-tools/packaging/packages.yml | 4 ++-- dev-tools/packaging/templates/docker/Dockerfile.tmpl | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 395dd313dec2..22f9c0701bdb 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -233,6 +233,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - When running under Elastic-Agent Kafka output allows dynamic topic in `topic` field {pull}40415[40415] - The script processor has a new configuration option that only uses the cached javascript sessions and prevents the creation of new javascript sessions. - Update to Go 1.22.7. {pull}41018[41018] +- Replace Ubuntu 20.04 with 24.04 for Docker base images {issue}40743[40743] {pull}40942[40942] + *Auditbeat* diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 8c22acc9dba6..1391368cf0bd 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -159,7 +159,7 @@ shared: - &docker_spec <<: *binary_spec extra_vars: - from: '--platform=linux/amd64 ubuntu:20.04' + from: '--platform=linux/amd64 ubuntu:24.04' buildFrom: '--platform=linux/amd64 cgr.dev/chainguard/wolfi-base' user: '{{ .BeatName }}' linux_capabilities: '' @@ -172,7 +172,7 @@ shared: - &docker_arm_spec <<: *docker_spec extra_vars: - from: '--platform=linux/arm64 ubuntu:20.04' + from: '--platform=linux/arm64 ubuntu:24.04' buildFrom: '--platform=linux/arm64 cgr.dev/chainguard/wolfi-base' - &docker_ubi_spec diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index f8848640079a..85904ffe5dd2 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -57,6 +57,8 @@ RUN for iter in {1..10}; do \ {{- end }} {{- if contains .from "ubuntu" }} +RUN touch /var/mail/ubuntu && chown ubuntu /var/mail/ubuntu && userdel -r ubuntu + RUN for iter in {1..10}; do \ apt-get update -y && \ DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ From dc94114a914017843fb8d4eaa76de3b61a6787cd Mon Sep 17 00:00:00 2001 From: VihasMakwana <121151420+VihasMakwana@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:00:10 +0530 Subject: [PATCH 12/90] [system/process]: mark module as healthy if metrics are partially filled for single process as well. (#40924) * fix: mark module as healthy if metrics are partially filled for single processes * chore: fix lint * chore: wrap the error and return * chore: fix lint --------- Co-authored-by: subham sarkar --- metricbeat/module/system/process/process.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index 01c8480656df..f84c0b6027a0 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -57,7 +57,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - sys := base.Module().(resolve.Resolver) + sys, ok := base.Module().(resolve.Resolver) + if !ok { + return nil, fmt.Errorf("resolver cannot be cast from the module") + } enableCgroups := false if runtime.GOOS == "linux" { if config.Cgroups == nil || *config.Cgroups { @@ -131,14 +134,17 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { return err } else { proc, root, err := m.stats.GetOneRootEvent(m.setpid) - if err != nil { + if err != nil && !errors.Is(err, process.NonFatalErr{}) { + // return only if the error is fatal in nature return fmt.Errorf("error fetching pid %d: %w", m.setpid, err) + } else if (err != nil && errors.Is(err, process.NonFatalErr{})) { + err = mb.PartialMetricsError{Err: err} } + // if error is non-fatal, emit partial metrics. r.Event(mb.Event{ MetricSetFields: proc, RootFields: root, }) + return err } - - return nil } From bc242aeb6c023d0e8c259c97a88b68d7f61daac3 Mon Sep 17 00:00:00 2001 From: ShourieG <105607378+ShourieG@users.noreply.github.com> Date: Tue, 8 Oct 2024 18:55:43 +0530 Subject: [PATCH 13/90] [filebeat][GCS] - Improved documentation (#41143) --- CHANGELOG.next.asciidoc | 1 + .../filebeat/docs/inputs/input-gcs.asciidoc | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 22f9c0701bdb..140b5c061cdd 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -319,6 +319,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add support to CEL for reading host environment variables. {issue}40762[40762] {pull}40779[40779] - Add CSV decoder to awss3 input. {pull}40896[40896] - Change request trace logging to include headers instead of complete request. {pull}41072[41072] +- Improved GCS input documentation. {pull}41143[41143] - Add CSV decoding capacity to azureblobstorage input {pull}40978[40978] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc index b73e6ca5232c..ef2db8c1f05c 100644 --- a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc @@ -213,17 +213,16 @@ This is a specific subfield of a bucket. It specifies the bucket name. This attribute defines the maximum amount of time after which a bucket operation will give and stop if no response is recieved (example: reading a file / listing a file). It can be defined in the following formats : `{{x}}s`, `{{x}}m`, `{{x}}h`, here `s = seconds`, `m = minutes` and `h = hours`. The value `{{x}}` can be anything we wish. -If no value is specified for this, by default its initialized to `50 seconds`. This attribute can be specified both at the root level of the configuration as well at the bucket level. -The bucket level values will always take priority and override the root level values if both are specified. +If no value is specified for this, by default its initialized to `50 seconds`. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always take priority and override the root level values if both are specified. The value of `bucket_timeout` that should be used depends on the size of the files and the network speed. If the timeout is too low, the input will not be able to read the file completely and `context_deadline_exceeded` errors will be seen in the logs. If the timeout is too high, the input will wait for a long time for the file to be read, which can cause the input to be slow. The ratio between the `bucket_timeout` and `poll_interval` should be considered while setting both the values. A low `poll_interval` and a very high `bucket_timeout` can cause resource utilization issues as schedule ops will be spawned every poll iteration. If previous poll ops are still running, this could result in concurrently running ops and so could cause a bottleneck over time. [id="attrib-max_workers-gcs"] [float] ==== `max_workers` -This attribute defines the maximum number of workers (go routines / lightweight threads) are allocated in the worker pool (thread pool) for processing jobs -which read contents of file. More number of workers equals a greater amount of concurrency achieved. There is an upper cap of `5000` workers per bucket that -can be defined due to internal sdk constraints. This attribute can be specified both at the root level of the configuration as well at the bucket level. -The bucket level values will always take priority and override the root level values if both are specified. +This attribute defines the maximum number of workers (goroutines / lightweight threads) are allocated in the worker pool (thread pool) for processing jobs which read the contents of files. This attribute can be specified both at the root level of the configuration and at the bucket level. Bucket level values override the root level values if both are specified. Larger number of workers do not necessarily improve of throughput, and this should be carefully tuned based on the number of files, the size of the files being processed and resources available. Increasing `max_workers` to very high values may cause resource utilization problems and can lead to a bottleneck in processing. Usually a maximum cap of `2000` workers is recommended. A very low `max_worker` count will drastically increase the number of network calls required to fetch the objects, which can cause a bottleneck in processing. + +NOTE: The value of `max_workers` is tied to the `batch_size` currently to ensure even distribution of workloads across all goroutines. This ensures that the input is able to process the files in an efficient manner. This `batch_size` determines how many objects will be fetched in one single call. The `max_workers` value should be set based on the number of files to be read, the resources available and the network speed. For example,`max_workers=3` would mean that every pagination request a total number of `3` gcs objects are fetched and distributed among `3 goroutines`, `max_workers=100` would mean `100` gcs objects are fetched in every pagination request and distributed among `100 goroutines`. + [id="attrib-poll-gcs"] [float] @@ -241,7 +240,9 @@ This attribute defines the maximum amount of time after which the internal sched defined in the following formats : `{{x}}s`, `{{x}}m`, `{{x}}h`, here `s = seconds`, `m = minutes` and `h = hours`. The value `{{x}}` can be anything we wish. Example : `10s` would mean we would like the polling to occur every 10 seconds. If no value is specified for this, by default its initialized to `300 seconds`. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always take priority -and override the root level values if both are specified. +and override the root level values if both are specified. The `poll_interval` should be set to a value that is equal to the `bucket_timeout` value. This would ensure that another schedule operation is not started before the current buckets have all been processed. If the `poll_interval` is set to a value that is less than the `bucket_timeout`, then the input will start another schedule operation before the current one has finished, which can cause a bottleneck over time. Having a lower `poll_interval` can make the input faster at the cost of more resource utilization. + +NOTE: Some edge case scenarios could require different values for `poll_interval` and `bucket_timeout`. For example, if the files are very large and the network speed is slow, then the `bucket_timeout` value should be set to a higher value than the `poll_interval`. This would ensure that polling operation does not wait too long for the files to be read and moves to the next iteration while the current one is still being processed. This would ensure a higher throughput and better resource utilization. [id="attrib-parse_json"] [float] @@ -276,6 +277,8 @@ filebeat.inputs: - regex: '/Security-Logs/' ---- +The `file_selectors` operation is performed within the agent locally, hence using this option will cause the agent to download all the files and then filter them. This can cause a bottleneck in processing if the number of files is very high. It is recommended to use this attribute only when the number of files is limited or ample resources are available. + [id="attrib-expand_event_list_from_field-gcs"] [float] ==== `expand_event_list_from_field` @@ -341,6 +344,8 @@ filebeat.inputs: timestamp_epoch: 1630444800 ---- +The GCS APIs don't provide a direct way to filter files based on the timestamp, so the input will download all the files and then filter them based on the timestamp. This can cause a bottleneck in processing if the number of files are very high. It is recommended to use this attribute only when the number of files are limited or ample resources are available. This option scales vertically and not horizontally. + [id="bucket-overrides"] *The sample configs below will explain the bucket level overriding of attributes a bit further :-* From 36327a4b2841f20e4685c308469df356f04415a6 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Tue, 8 Oct 2024 08:43:23 -0600 Subject: [PATCH 14/90] [AWS] Use namespace for GetListMetrics when exists (#41022) * Use namespace for GetListMetrics when exists --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/aws.asciidoc | 3 ++- .../metricbeat/module/aws/_meta/docs.asciidoc | 3 ++- .../module/aws/cloudwatch/cloudwatch.go | 20 +++++++++++++++---- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 140b5c061cdd..7b0ced75159b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -202,6 +202,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Remove excessive info-level logs in cgroups setup {pull}40491[40491] - Add missing ECS Cloud fields in GCP `metrics` metricset when using `exclude_labels: true` {issue}40437[40437] {pull}40467[40467] - Add AWS OwningAccount support for cross account monitoring {issue}40570[40570] {pull}40691[40691] +- Use namespace for GetListMetrics when exists in AWS {pull}41022[41022] - Fix http server helper SSL config. {pull}39405[39405] *Osquerybeat* diff --git a/metricbeat/docs/modules/aws.asciidoc b/metricbeat/docs/modules/aws.asciidoc index 20b6854a834e..0ee7f601052f 100644 --- a/metricbeat/docs/modules/aws.asciidoc +++ b/metricbeat/docs/modules/aws.asciidoc @@ -329,7 +329,8 @@ GetMetricData max page size: 100, based on https://docs.aws.amazon.com/AmazonClo | IAM ListAccountAliases | 1 | Once on startup | STS GetCallerIdentity | 1 | Once on startup | EC2 DescribeRegions| 1 | Once on startup -| CloudWatch ListMetrics | Total number of results / ListMetrics max page size | Per region per collection period +| CloudWatch ListMetrics without specifying namespace in configuration | Total number of results / ListMetrics max page size | Per region per collection period +| CloudWatch ListMetrics with specific namespaces in configuration | Total number of results / ListMetrics max page size * number of unique namespaces | Per region per collection period | CloudWatch GetMetricData | Total number of results / GetMetricData max page size | Per region per namespace per collection period |=== `billing`, `ebs`, `elb`, `sns`, `usage` and `lambda` are the same as `cloudwatch` metricset. diff --git a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc index 223f90e0e24b..0b224bf5630c 100644 --- a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc @@ -317,7 +317,8 @@ GetMetricData max page size: 100, based on https://docs.aws.amazon.com/AmazonClo | IAM ListAccountAliases | 1 | Once on startup | STS GetCallerIdentity | 1 | Once on startup | EC2 DescribeRegions| 1 | Once on startup -| CloudWatch ListMetrics | Total number of results / ListMetrics max page size | Per region per collection period +| CloudWatch ListMetrics without specifying namespace in configuration | Total number of results / ListMetrics max page size | Per region per collection period +| CloudWatch ListMetrics with specific namespaces in configuration | Total number of results / ListMetrics max page size * number of unique namespaces | Per region per collection period | CloudWatch GetMetricData | Total number of results / GetMetricData max page size | Per region per namespace per collection period |=== `billing`, `ebs`, `elb`, `sns`, `usage` and `lambda` are the same as `cloudwatch` metricset. diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go index dde2463ea852..ed043e8c38f1 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go @@ -179,10 +179,22 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { continue } - // retrieve all the details for all the metrics available in the current region - listMetricsOutput, err := aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) - if err != nil { - m.Logger().Errorf("Error while retrieving the list of metrics for region %s: %w", regionName, err) + // retrieve all the details for all the metrics available in the current region when no namespace is specified + // otherwise only retrieve metrics from the specific namespaces from the config + var listMetricsOutput []aws.MetricWithID + if len(namespaceDetailTotal) == 0 { + listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) + if err != nil { + m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, "*", err) + } + } else { + for namespace := range namespaceDetailTotal { + listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) + if err != nil { + m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, namespace, err) + } + listMetricsOutput = append(listMetricsOutput, listMetricsOutputPerNamespace...) + } } if len(listMetricsOutput) == 0 { From 6339a4292da1bfcf5a33c8593b41556efbbdad86 Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Tue, 8 Oct 2024 13:54:53 -0300 Subject: [PATCH 15/90] chore: update ksm to v2.12.0 (#41051) We have test files for ksm v2.12.0 but the module manifest still requires v2.7.0. --- metricbeat/module/kubernetes/kubernetes.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/kubernetes.yml b/metricbeat/module/kubernetes/kubernetes.yml index 1ce2c581bc92..7ce36d136dfc 100644 --- a/metricbeat/module/kubernetes/kubernetes.yml +++ b/metricbeat/module/kubernetes/kubernetes.yml @@ -43,7 +43,7 @@ spec: spec: containers: - name: kube-state-metrics - image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.7.0 + image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.12.0 livenessProbe: httpGet: path: /healthz From 4be27b6ec3332b97557d0d7415a26528075c846d Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Wed, 9 Oct 2024 10:08:11 +1030 Subject: [PATCH 16/90] x-pack/filebeat/input/gcs: add support for CSV decoding (#40979) The test file txn.csv.gz was obtained from https://netskopepartnerlogfilebucket.s3.amazonaws.com/txn-1722875066329034-fe10b6a23cc643c4b282e6190de2352d.csv.gz --- CHANGELOG.next.asciidoc | 1 + .../filebeat/docs/inputs/input-gcs.asciidoc | 57 +- x-pack/filebeat/input/gcs/config.go | 10 + x-pack/filebeat/input/gcs/decoding.go | 47 ++ x-pack/filebeat/input/gcs/decoding_config.go | 54 ++ x-pack/filebeat/input/gcs/decoding_csv.go | 139 ++++ x-pack/filebeat/input/gcs/decoding_test.go | 225 +++++++ x-pack/filebeat/input/gcs/input.go | 2 + x-pack/filebeat/input/gcs/input_stateless.go | 1 + x-pack/filebeat/input/gcs/job.go | 109 +++- x-pack/filebeat/input/gcs/testdata/txn.csv | 5 + x-pack/filebeat/input/gcs/testdata/txn.csv.gz | Bin 0 -> 2527 bytes x-pack/filebeat/input/gcs/testdata/txn.json | 594 ++++++++++++++++++ x-pack/filebeat/input/gcs/types.go | 1 + 14 files changed, 1224 insertions(+), 21 deletions(-) create mode 100644 x-pack/filebeat/input/gcs/decoding.go create mode 100644 x-pack/filebeat/input/gcs/decoding_config.go create mode 100644 x-pack/filebeat/input/gcs/decoding_csv.go create mode 100644 x-pack/filebeat/input/gcs/decoding_test.go create mode 100644 x-pack/filebeat/input/gcs/testdata/txn.csv create mode 100644 x-pack/filebeat/input/gcs/testdata/txn.csv.gz create mode 100644 x-pack/filebeat/input/gcs/testdata/txn.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 7b0ced75159b..edc156862980 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -322,6 +322,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Change request trace logging to include headers instead of complete request. {pull}41072[41072] - Improved GCS input documentation. {pull}41143[41143] - Add CSV decoding capacity to azureblobstorage input {pull}40978[40978] +- Add CSV decoding capacity to gcs input {pull}40979[40979] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc index ef2db8c1f05c..eae7158c78df 100644 --- a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc @@ -253,6 +253,61 @@ highly nested json data. If this is set to `false` the *gcs.storage.object.json_ applicable for json objects and has no effect on other types of objects. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always take priority and override the root level values if both are specified. +[id="input-{type}-encoding"] +[float] +==== `encoding` + +The file encoding to use for reading data that contains international +characters. This only applies to non-JSON logs. See <<_encoding_3>>. + +[id="input-{type}-decoding"] +[float] +==== `decoding` + +The file decoding option is used to specify a codec that will be used to +decode the file contents. This can apply to any file stream data. +An example config is shown below: + +Currently supported codecs are given below:- + + 1. <>: This codec decodes RFC 4180 CSV data streams. + +[id="attrib-decoding-csv-gcs"] +[float] +==== `the CSV codec` +The `CSV` codec is used to decode RFC 4180 CSV data streams. +Enabling the codec without other options will use the default codec options. + +[source,yaml] +---- + decoding.codec.csv.enabled: true +---- + +The CSV codec supports five sub attributes to control aspects of CSV decoding. +The `comma` attribute specifies the field separator character used by the CSV +format. If it is not specified, the comma character '`,`' is used. The `comment` +attribute specifies the character that should be interpreted as a comment mark. +If it is specified, lines starting with the character will be ignored. Both +`comma` and `comment` must be single characters. The `lazy_quotes` attribute +controls how quoting in fields is handled. If `lazy_quotes` is true, a quote may +appear in an unquoted field and a non-doubled quote may appear in a quoted field. +The `trim_leading_space` attribute specifies that leading white space should be +ignored, even if the `comma` character is white space. For complete details +of the preceding configuration attribute behaviors, see the CSV decoder +https://pkg.go.dev/encoding/csv#Reader[documentation] The `fields_names` +attribute can be used to specify the column names for the data. If it is +absent, the field names are obtained from the first non-comment line of +data. The number of fields must match the number of field names. + +An example config is shown below: + +[source,yaml] +---- + decoding.codec.csv.enabled: true + decoding.codec.csv.comma: "\t" + decoding.codec.csv.comment: "#" +---- + [id="attrib-file_selectors-gcs"] [float] ==== `file_selectors` @@ -408,4 +463,4 @@ In this configuration even though we have specified `max_workers = 10`, `poll = will override these values with their own respective values which are defined as part of their sub attibutes. -NOTE: Any feedback is welcome which will help us further optimize this input. Please feel free to open a github issue for any bugs or feature requests. \ No newline at end of file +NOTE: Any feedback is welcome which will help us further optimize this input. Please feel free to open a github issue for any bugs or feature requests. diff --git a/x-pack/filebeat/input/gcs/config.go b/x-pack/filebeat/input/gcs/config.go index ed589e43df1a..6a7b93d5e479 100644 --- a/x-pack/filebeat/input/gcs/config.go +++ b/x-pack/filebeat/input/gcs/config.go @@ -16,6 +16,7 @@ import ( "golang.org/x/oauth2/google" "github.com/elastic/beats/v7/libbeat/common/match" + "github.com/elastic/beats/v7/libbeat/reader/parser" ) // MaxWorkers, Poll, PollInterval, BucketTimeOut, ParseJSON, FileSelectors, TimeStampEpoch & ExpandEventListFromField @@ -41,6 +42,8 @@ type config struct { Buckets []bucket `config:"buckets" validate:"required"` // FileSelectors - Defines a list of regex patterns that can be used to filter out objects from the bucket. FileSelectors []fileSelectorConfig `config:"file_selectors"` + // ReaderConfig is the default parser and decoder configuration. + ReaderConfig readerConfig `config:",inline"` // TimeStampEpoch - Defines the epoch time in seconds, which is used to filter out objects that are older than the specified timestamp. TimeStampEpoch *int64 `config:"timestamp_epoch"` // ExpandEventListFromField - Defines the field name that will be used to expand the event into separate events. @@ -58,6 +61,7 @@ type bucket struct { PollInterval *time.Duration `config:"poll_interval,omitempty"` ParseJSON *bool `config:"parse_json,omitempty"` FileSelectors []fileSelectorConfig `config:"file_selectors"` + ReaderConfig readerConfig `config:",inline"` TimeStampEpoch *int64 `config:"timestamp_epoch"` ExpandEventListFromField string `config:"expand_event_list_from_field"` } @@ -68,6 +72,12 @@ type fileSelectorConfig struct { // TODO: Add support for reader config in future } +// readerConfig defines the options for reading the content of an GCS object. +type readerConfig struct { + Parsers parser.Config `config:",inline"` + Decoding decoderConfig `config:"decoding"` +} + type authConfig struct { CredentialsJSON *jsonCredentialsConfig `config:"credentials_json,omitempty"` CredentialsFile *fileCredentialsConfig `config:"credentials_file,omitempty"` diff --git a/x-pack/filebeat/input/gcs/decoding.go b/x-pack/filebeat/input/gcs/decoding.go new file mode 100644 index 000000000000..c6236147d4bf --- /dev/null +++ b/x-pack/filebeat/input/gcs/decoding.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package gcs + +import ( + "fmt" + "io" +) + +// decoder is an interface for decoding data from an io.Reader. +type decoder interface { + // decode reads and decodes data from an io reader based on the codec type. + // It returns the decoded data and an error if the data cannot be decoded. + decode() ([]byte, error) + // next advances the decoder to the next data item and returns true if there is more data to be decoded. + next() bool + // close closes the decoder and releases any resources associated with it. + // It returns an error if the decoder cannot be closed. + + // more returns whether there are more records to read. + more() bool + + close() error +} + +// valueDecoder is a decoder that can decode directly to a JSON serialisable value. +type valueDecoder interface { + decoder + + decodeValue() ([]byte, map[string]any, error) +} + +// newDecoder creates a new decoder based on the codec type. +// It returns a decoder type and an error if the codec type is not supported. +// If the reader config codec option is not set, it returns a nil decoder and nil error. +func newDecoder(cfg decoderConfig, r io.Reader) (decoder, error) { + switch { + case cfg.Codec == nil: + return nil, nil + case cfg.Codec.CSV != nil: + return newCSVDecoder(cfg, r) + default: + return nil, fmt.Errorf("unsupported config value: %v", cfg) + } +} diff --git a/x-pack/filebeat/input/gcs/decoding_config.go b/x-pack/filebeat/input/gcs/decoding_config.go new file mode 100644 index 000000000000..625dbce473e9 --- /dev/null +++ b/x-pack/filebeat/input/gcs/decoding_config.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package gcs + +import ( + "fmt" + "unicode/utf8" +) + +// decoderConfig contains the configuration options for instantiating a decoder. +type decoderConfig struct { + Codec *codecConfig `config:"codec"` +} + +// codecConfig contains the configuration options for different codecs used by a decoder. +type codecConfig struct { + CSV *csvCodecConfig `config:"csv"` +} + +// csvCodecConfig contains the configuration options for the CSV codec. +type csvCodecConfig struct { + Enabled bool `config:"enabled"` + + // Fields is the set of field names. If it is present + // it is used to specify the object names of returned + // values and the FieldsPerRecord field in the csv.Reader. + // Otherwise, names are obtained from the first + // line of the CSV data. + Fields []string `config:"fields_names"` + + // The fields below have the same meaning as the + // fields of the same name in csv.Reader. + Comma *configRune `config:"comma"` + Comment configRune `config:"comment"` + LazyQuotes bool `config:"lazy_quotes"` + TrimLeadingSpace bool `config:"trim_leading_space"` +} + +type configRune rune + +func (r *configRune) Unpack(s string) error { + if s == "" { + return nil + } + n := utf8.RuneCountInString(s) + if n != 1 { + return fmt.Errorf("single character option given more than one character: %q", s) + } + _r, _ := utf8.DecodeRuneInString(s) + *r = configRune(_r) + return nil +} diff --git a/x-pack/filebeat/input/gcs/decoding_csv.go b/x-pack/filebeat/input/gcs/decoding_csv.go new file mode 100644 index 000000000000..8abeea0ba54d --- /dev/null +++ b/x-pack/filebeat/input/gcs/decoding_csv.go @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package gcs + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "slices" +) + +// csvDecoder is a decoder for CSV data. +type csvDecoder struct { + r *csv.Reader + + header []string + current []string + coming []string + + err error +} + +// newCSVDecoder creates a new CSV decoder. +func newCSVDecoder(config decoderConfig, r io.Reader) (decoder, error) { + d := csvDecoder{r: csv.NewReader(r)} + d.r.ReuseRecord = true + if config.Codec.CSV.Comma != nil { + d.r.Comma = rune(*config.Codec.CSV.Comma) + } + d.r.Comment = rune(config.Codec.CSV.Comment) + d.r.LazyQuotes = config.Codec.CSV.LazyQuotes + d.r.TrimLeadingSpace = config.Codec.CSV.TrimLeadingSpace + if len(config.Codec.CSV.Fields) != 0 { + d.r.FieldsPerRecord = len(config.Codec.CSV.Fields) + d.header = config.Codec.CSV.Fields + } else { + h, err := d.r.Read() + if err != nil { + return nil, err + } + d.header = slices.Clone(h) + } + var err error + d.coming, err = d.r.Read() + if err != nil { + return nil, err + } + d.current = make([]string, 0, len(d.header)) + return &d, nil +} + +func (d *csvDecoder) more() bool { return len(d.coming) == len(d.header) } + +// next advances the decoder to the next data item and returns true if +// there is more data to be decoded. +func (d *csvDecoder) next() bool { + if !d.more() && d.err != nil { + return false + } + d.current = d.current[:len(d.header)] + copy(d.current, d.coming) + d.coming, d.err = d.r.Read() + if d.err == io.EOF { + d.coming = nil + } + return true +} + +// decode returns the JSON encoded value of the current CSV line. next must +// have been called before any calls to decode. +func (d *csvDecoder) decode() ([]byte, error) { + err := d.check() + if err != nil { + return nil, err + } + var buf bytes.Buffer + buf.WriteByte('{') + for i, n := range d.header { + if i != 0 { + buf.WriteByte(',') + } + buf.WriteByte('"') + buf.WriteString(n) + buf.WriteString(`":"`) + buf.WriteString(d.current[i]) + buf.WriteByte('"') + } + buf.WriteByte('}') + d.current = d.current[:0] + return buf.Bytes(), nil +} + +// decodeValue returns the value of the current CSV line interpreted as +// an object with fields based on the header held by the receiver. next must +// have been called before any calls to decode. +func (d *csvDecoder) decodeValue() ([]byte, map[string]any, error) { + err := d.check() + if err != nil { + return nil, nil, err + } + m := make(map[string]any, len(d.header)) + for i, n := range d.header { + m[n] = d.current[i] + } + d.current = d.current[:0] + b, err := d.decode() + if err != nil { + return nil, nil, err + } + return b, m, nil +} + +func (d *csvDecoder) check() error { + if d.err != nil { + if d.err == io.EOF && d.coming == nil { + return nil + } + return d.err + } + if len(d.current) == 0 { + return fmt.Errorf("decode called before next") + } + // By the time we are here, current must be the same + // length as header; if it was not read, it would be + // zero, but if it was, it must match by the contract + // of the csv.Reader. + return nil +} + +// close closes the csv decoder and releases the resources. +func (d *csvDecoder) close() error { + if d.err == io.EOF { + return nil + } + return d.err +} diff --git a/x-pack/filebeat/input/gcs/decoding_test.go b/x-pack/filebeat/input/gcs/decoding_test.go new file mode 100644 index 000000000000..0a2ee5e3f0d7 --- /dev/null +++ b/x-pack/filebeat/input/gcs/decoding_test.go @@ -0,0 +1,225 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package gcs + +import ( + "context" + "encoding/json" + "errors" + "os" + "path/filepath" + "reflect" + "testing" + + "cloud.google.com/go/storage" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/beat" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" +) + +// all test files are read from the "testdata" directory +const testDataPath = "testdata" + +func TestDecoding(t *testing.T) { + logp.TestingSetup() + log := logp.L() + + testCases := []struct { + name string + file string + contentType string + numEvents int + assertAgainst string + config decoderConfig + }{ + { + name: "gzip_csv", + file: "txn.csv.gz", + numEvents: 4, + assertAgainst: "txn.json", + config: decoderConfig{ + Codec: &codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune](' '), + }, + }, + }, + }, + { + name: "csv", + file: "txn.csv", + numEvents: 4, + assertAgainst: "txn.json", + config: decoderConfig{ + Codec: &codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune](' '), + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + file := filepath.Join(testDataPath, tc.file) + if tc.contentType == "" { + tc.contentType = "application/octet-stream" + } + f, err := os.Open(file) + if err != nil { + t.Fatalf("failed to open test data: %v", err) + } + defer f.Close() + p := &pub{t: t} + j := newJob(&storage.BucketHandle{}, &storage.ObjectAttrs{Name: "test_object"}, "gs://test_uri", newState(), &Source{}, p, log, false) + j.src.ReaderConfig.Decoding = tc.config + err = j.decode(context.Background(), f, "test") + if err != nil { + t.Errorf("unexpected error calling decode: %v", err) + } + + events := p.events + if tc.assertAgainst != "" { + targetData := readJSONFromFile(t, filepath.Join(testDataPath, tc.assertAgainst)) + assert.Equal(t, len(targetData), len(events)) + + for i, event := range events { + msg, err := event.Fields.GetValue("message") + assert.NoError(t, err) + assert.JSONEq(t, targetData[i], msg.(string)) + } + } + }) + } +} + +type pub struct { + t *testing.T + events []beat.Event +} + +func (p *pub) Publish(e beat.Event, _cursor interface{}) error { + p.t.Logf("%v\n", e.Fields) + p.events = append(p.events, e) + return nil +} + +// readJSONFromFile reads the json file and returns the data as a slice of strings +func readJSONFromFile(t *testing.T, filepath string) []string { + fileBytes, err := os.ReadFile(filepath) + assert.NoError(t, err) + var rawMessages []json.RawMessage + err = json.Unmarshal(fileBytes, &rawMessages) + assert.NoError(t, err) + var data []string + + for _, rawMsg := range rawMessages { + data = append(data, string(rawMsg)) + } + return data +} + +var codecConfigTests = []struct { + name string + yaml string + want decoderConfig + wantErr error +}{ + { + name: "handle_rune", + yaml: ` +codec: + csv: + enabled: true + comma: ' ' + comment: '#' +`, + want: decoderConfig{&codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune](' '), + Comment: '#', + }, + }}, + }, + { + name: "no_comma", + yaml: ` +codec: + csv: + enabled: true +`, + want: decoderConfig{&codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + }, + }}, + }, + { + name: "null_comma", + yaml: ` +codec: + csv: + enabled: true + comma: "\u0000" +`, + want: decoderConfig{&codecConfig{ + CSV: &csvCodecConfig{ + Enabled: true, + Comma: ptr[configRune]('\x00'), + }, + }}, + }, + { + name: "bad_rune", + yaml: ` +codec: + csv: + enabled: true + comma: 'this is too long' +`, + wantErr: errors.New(`single character option given more than one character: "this is too long" accessing 'codec.csv.comma'`), + }, +} + +func TestCodecConfig(t *testing.T) { + for _, test := range codecConfigTests { + t.Run(test.name, func(t *testing.T) { + c, err := conf.NewConfigWithYAML([]byte(test.yaml), "") + if err != nil { + t.Fatalf("unexpected error unmarshaling config: %v", err) + } + + var got decoderConfig + err = c.Unpack(&got) + if !sameError(err, test.wantErr) { + t.Errorf("unexpected error unpacking config: got:%v want:%v", err, test.wantErr) + } + + if !reflect.DeepEqual(got, test.want) { + t.Errorf("unexpected result\n--- want\n+++ got\n%s", cmp.Diff(test.want, got)) + } + }) + } +} + +func sameError(a, b error) bool { + switch { + case a == nil && b == nil: + return true + case a == nil, b == nil: + return false + default: + return a.Error() == b.Error() + } +} + +func ptr[T any](v T) *T { return &v } diff --git a/x-pack/filebeat/input/gcs/input.go b/x-pack/filebeat/input/gcs/input.go index 97b14dc2b345..a2ecf2c28afc 100644 --- a/x-pack/filebeat/input/gcs/input.go +++ b/x-pack/filebeat/input/gcs/input.go @@ -71,6 +71,7 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { TimeStampEpoch: bucket.TimeStampEpoch, ExpandEventListFromField: bucket.ExpandEventListFromField, FileSelectors: bucket.FileSelectors, + ReaderConfig: bucket.ReaderConfig, }) } @@ -125,6 +126,7 @@ func tryOverrideOrDefault(cfg config, b bucket) bucket { if len(b.FileSelectors) == 0 && len(cfg.FileSelectors) != 0 { b.FileSelectors = cfg.FileSelectors } + b.ReaderConfig = cfg.ReaderConfig return b } diff --git a/x-pack/filebeat/input/gcs/input_stateless.go b/x-pack/filebeat/input/gcs/input_stateless.go index 04ec19de5ddf..3cdeb3794739 100644 --- a/x-pack/filebeat/input/gcs/input_stateless.go +++ b/x-pack/filebeat/input/gcs/input_stateless.go @@ -62,6 +62,7 @@ func (in *statelessInput) Run(inputCtx v2.Context, publisher stateless.Publisher TimeStampEpoch: bucket.TimeStampEpoch, ExpandEventListFromField: bucket.ExpandEventListFromField, FileSelectors: bucket.FileSelectors, + ReaderConfig: bucket.ReaderConfig, } st := newState() diff --git a/x-pack/filebeat/input/gcs/job.go b/x-pack/filebeat/input/gcs/job.go index 63e631e39be9..403555311e9d 100644 --- a/x-pack/filebeat/input/gcs/job.go +++ b/x-pack/filebeat/input/gcs/job.go @@ -137,20 +137,85 @@ func (j *job) processAndPublishData(ctx context.Context, id string) error { } }() - err = j.readJsonAndPublish(ctx, reader, id) - if err != nil { - return fmt.Errorf("failed to read data from object: %s, with error: %w", j.object.Name, err) - } - - return err + return j.decode(ctx, reader, id) } -func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) error { +func (j *job) decode(ctx context.Context, r io.Reader, id string) error { r, err := j.addGzipDecoderIfNeeded(bufio.NewReader(r)) if err != nil { return fmt.Errorf("failed to add gzip decoder to object: %s, with error: %w", j.object.Name, err) } + dec, err := newDecoder(j.src.ReaderConfig.Decoding, r) + if err != nil { + return err + } + var evtOffset int64 + switch dec := dec.(type) { + case valueDecoder: + defer dec.close() + + for dec.next() { + var ( + msg []byte + val []mapstr.M + ) + if j.src.ParseJSON { + var v mapstr.M + msg, v, err = dec.decodeValue() + if err != nil { + if err == io.EOF { + return nil + } + break + } + val = []mapstr.M{v} + } else { + msg, err = dec.decode() + if err != nil { + if err == io.EOF { + return nil + } + break + } + } + evt := j.createEvent(msg, val, evtOffset) + j.publish(evt, !dec.more(), id) + } + + case decoder: + defer dec.close() + + for dec.next() { + msg, err := dec.decode() + if err != nil { + if err == io.EOF { + return nil + } + break + } + var val []mapstr.M + if j.src.ParseJSON { + val, err = decodeJSON(bytes.NewReader(msg)) + if err != nil { + j.log.Errorw("job encountered an error", "gcs.jobId", id, "error", err) + } + } + evt := j.createEvent(msg, val, evtOffset) + j.publish(evt, !dec.more(), id) + } + + default: + err = j.readJsonAndPublish(ctx, r, id) + if err != nil { + return fmt.Errorf("failed to read data from object: %s, with error: %w", j.object.Name, err) + } + } + + return err +} +func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) error { + var err error r, j.isRootArray, err = evaluateJSON(bufio.NewReader(r)) if err != nil { return fmt.Errorf("failed to evaluate json for object: %s, with error: %w", j.object.Name, err) @@ -190,23 +255,27 @@ func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) er } } evt := j.createEvent(item, parsedData, offset) - if !dec.More() { - // if this is the last object, then perform a complete state save - cp, done := j.state.saveForTx(j.object.Name, j.object.Updated) - if err := j.publisher.Publish(evt, cp); err != nil { - j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) - } - done() - } else { - // since we don't update the cursor checkpoint, lack of a lock here is not a problem - if err := j.publisher.Publish(evt, nil); err != nil { - j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) - } - } + j.publish(evt, !dec.More(), id) } return nil } +func (j *job) publish(evt beat.Event, last bool, id string) { + if last { + // if this is the last object, then perform a complete state save + cp, done := j.state.saveForTx(j.object.Name, j.object.Updated) + if err := j.publisher.Publish(evt, cp); err != nil { + j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) + } + done() + return + } + // since we don't update the cursor checkpoint, lack of a lock here is not a problem + if err := j.publisher.Publish(evt, nil); err != nil { + j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) + } +} + // splitEventList splits the event list into individual events and publishes them func (j *job) splitEventList(key string, raw json.RawMessage, offset int64, objHash string, id string) error { var jsonObject map[string]json.RawMessage diff --git a/x-pack/filebeat/input/gcs/testdata/txn.csv b/x-pack/filebeat/input/gcs/testdata/txn.csv new file mode 100644 index 000000000000..80ca65df21ef --- /dev/null +++ b/x-pack/filebeat/input/gcs/testdata/txn.csv @@ -0,0 +1,5 @@ +date time time-taken cs-bytes sc-bytes bytes c-ip s-ip cs-username cs-method cs-uri-scheme cs-uri-query cs-user-agent cs-content-type sc-status sc-content-type cs-dns cs-host cs-uri cs-uri-port cs-referer x-cs-session-id x-cs-access-method x-cs-app x-s-country x-s-latitude x-s-longitude x-s-location x-s-region x-s-zipcode x-c-country x-c-latitude x-c-longitude x-c-location x-c-region x-c-zipcode x-c-os x-c-browser x-c-browser-version x-c-device x-cs-site x-cs-timestamp x-cs-page-id x-cs-userip x-cs-traffic-type x-cs-tunnel-id x-category x-other-category x-type x-server-ssl-err x-client-ssl-err x-transaction-id x-request-id x-cs-sni x-cs-domain-fronted-sni x-category-id x-other-category-id x-sr-headers-name x-sr-headers-value x-cs-ssl-ja3 x-sr-ssl-ja3s x-ssl-bypass x-ssl-bypass-reason x-r-cert-subject-cn x-r-cert-issuer-cn x-r-cert-startdate x-r-cert-enddate x-r-cert-valid x-r-cert-expired x-r-cert-untrusted-root x-r-cert-incomplete-chain x-r-cert-self-signed x-r-cert-revoked x-r-cert-revocation-check x-r-cert-mismatch x-cs-ssl-fronting-error x-cs-ssl-handshake-error x-sr-ssl-handshake-error x-sr-ssl-client-certificate-error x-sr-ssl-malformed-ssl x-s-custom-signing-ca-error x-cs-ssl-engine-action x-cs-ssl-engine-action-reason x-sr-ssl-engine-action x-sr-ssl-engine-action-reason x-ssl-policy-src-ip x-ssl-policy-dst-ip x-ssl-policy-dst-host x-ssl-policy-dst-host-source x-ssl-policy-categories x-ssl-policy-action x-ssl-policy-name x-cs-ssl-version x-cs-ssl-cipher x-sr-ssl-version x-sr-ssl-cipher x-cs-src-ip-egress x-s-dp-name x-cs-src-ip x-cs-src-port x-cs-dst-ip x-cs-dst-port x-sr-src-ip x-sr-src-port x-sr-dst-ip x-sr-dst-port x-cs-ip-connect-xff x-cs-ip-xff x-cs-connect-host x-cs-connect-port x-cs-connect-user-agent x-cs-url x-cs-uri-path x-cs-http-version rs-status x-cs-app-category x-cs-app-cci x-cs-app-ccl x-cs-app-tags x-cs-app-suite x-cs-app-instance-id x-cs-app-instance-name x-cs-app-instance-tag x-cs-app-activity x-cs-app-from-user x-cs-app-to-user x-cs-app-object-type x-cs-app-object-name x-cs-app-object-id x-rs-file-type x-rs-file-category x-rs-file-language x-rs-file-size x-rs-file-md5 x-rs-file-sha256 x-error x-c-local-time x-policy-action x-policy-name x-policy-src-ip x-policy-dst-ip x-policy-dst-host x-policy-dst-host-source x-policy-justification-type x-policy-justification-reason x-sc-notification-name +2024-08-05 16:24:20 64 2971 2050 5021 10.5.78.159 204.79.197.237 "vikash.ranjan@riverbed.com" GET https cc=US&setlang=en-US "Mozilla/5.0 (Windows NT 10.0; Win64; x64; Cortana 1.14.7.19041; 10.0.0.0.19045.2006) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19045" - 200 "application/json; charset=utf-8" www.bing.com www.bing.com /client/config?cc=US&setlang=en-US 443 - 3683772769278232507 "Client" "Microsoft Bing" "US" 47.682899 -122.120903 "Redmond" "Washington" "N/A" "US" 29.775400 -95.598000 "Houston" "Texas" "77079" "Windows 10" "Edge" "18.19045" "Windows Device" "bing" 1722875060 5762388460300455936 10.5.78.159 CloudApp - "Search Engines" - http_transaction - - 2696581500064586450 2901306739654139904 www.bing.com - 551 - - - 28a2c9bd18a11de089ef85a160da29e4 NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No No NotChecked NotChecked NotChecked No Allow "Established" None "NotEstablished" 10.5.78.159 69.192.139.97 www.bing.com Sni "Search Engines" Decrypt - TLSv1.2 ECDHE-RSA-AES256-GCM-SHA384 NotChecked NotChecked 208.185.23.18 "US-ATL2" 10.5.78.159 25941 69.192.139.97 443 - - 10.144.54.201 842 - - - - - https://www.bing.com/client/config?cc=US&setlang=en-US /client/config HTTP1.1 200 "Search Engines" 58 low "Consumer,Unsanctioned" - - - - "Browse" - - - - - - - - - - - - "2024-08-05 11:24:00" "allow" "NetskopeAllow" 10.5.78.159 204.79.197.237 www.bing.com HttpHostHeader - - - +2024-08-05 16:24:19 - 18 0 18 10.70.0.19 - "nadav@skyformation.onmicrosoft.com" PRI - - - - - - - us-west1-b-osconfig.googleapis.com * 443 - 0 "Client" - - - - - - - "US" 45.605600 -121.180700 "The Dalles" "Oregon" "97058" - - - - - 1722875059 0 10.70.0.19 - - "Technology" "Cloud Storage" http_transaction - - 2035489204758272484 0 us-west1-b-osconfig.googleapis.com - 564 "7" - - 7a15285d4efc355608b304698cd7f9ab NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No No NotChecked NotChecked NotChecked No Allow "Established" None "NotEstablished" 10.70.0.19 142.250.99.95 us-west1-b-osconfig.googleapis.com Sni "Technology, Cloud Storage" Decrypt - TLSv1.3 TLS_AES_256_GCM_SHA384 NotChecked NotChecked 34.82.190.203 "US-SEA2" 10.70.0.19 32951 142.250.99.95 443 - - - - - - - - - - - HTTP1.1 - - - - - - - - - - - - - - - - - - - - - http-malformed "NotChecked" NotChecked - - - - - - - - +2024-08-05 16:24:20 - 0 0 0 10.0.20.111 - "levente.fangli@cososys.com" - - - - - - - - achecker-alliances.eu.goskope.com - 443 - 0 "Client" - - - - - - - "RO" 46.765700 23.594300 "Cluj-Napoca" "Cluj County" "400027" - - - - - 1722875060 0 10.0.20.111 - - - - http_transaction - "HsFailure (error:14094418:SSL routines:ssl3_read_bytes:tlsv1 alert unknown ca)" 1350739992944030464 0 achecker-alliances.eu.goskope.com - - - - - bc29aa426fc99c0be1b9be941869f88a NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No Yes NotChecked NotChecked NotChecked No Block "SSL Error - SSL Handshake Error" None "NotEstablished" - - - Unknown - Decrypt - - - NotChecked NotChecked 81.196.156.53 "AT-VIE1" 10.0.20.111 57897 31.186.239.94 443 - - - - - - - - - - - UNKNOWN - - - - - - - - - - - - - - - - - - - - - client-ssl "NotChecked" NotChecked - - - - - - - - +2024-08-05 16:24:23 - 0 0 0 10.0.20.111 - "levente.fangli@cososys.com" - - - - - - - - achecker-alliances.eu.goskope.com - 443 - 0 "Client" - - - - - - - "RO" 46.765700 23.594300 "Cluj-Napoca" "Cluj County" "400027" - - - - - 1722875063 0 10.0.20.111 - - - - http_transaction - "HsFailure (error:14094418:SSL routines:ssl3_read_bytes:tlsv1 alert unknown ca)" 1615432978285898071 0 achecker-alliances.eu.goskope.com - - - - - bc29aa426fc99c0be1b9be941869f88a NotAvailable No - "NotChecked" "NotChecked" "NotChecked" "NotChecked" NotChecked NotChecked NotChecked NotChecked NotChecked NotChecked "NotChecked" NotChecked No Yes NotChecked NotChecked NotChecked No Block "SSL Error - SSL Handshake Error" None "NotEstablished" - - - Unknown - Decrypt - - - NotChecked NotChecked 81.196.156.53 "AT-VIE1" 10.0.20.111 57897 31.186.239.94 443 - - - - - - - - - - - UNKNOWN - - - - - - - - - - - - - - - - - - - - - client-ssl "NotChecked" NotChecked - - - - - - - - diff --git a/x-pack/filebeat/input/gcs/testdata/txn.csv.gz b/x-pack/filebeat/input/gcs/testdata/txn.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..52e8fb20539a40e3127997a8877e3346ea1c6dfc GIT binary patch literal 2527 zcmV<52_W_#iwFP!00000|8!Mbn(H0bbhgbUzHnwq|6>2XLC4)%VlQk~5hX z8-672pb3p{=C<+y!!HguQuufp>rJT=J&Y^wJt84{jZSZ)s>1!Ywi1GuU1 zNW^-&SfG8sJs|mT1e0f`J({y);=;tRj2xl47&0TgH1dzzDdsaYsG?C7T~yJiin=Pg zsEV3@9}@r55>%t%&Z6zg#CJzrV|G@lR7Ex1_QLMC@R^BJ09OSjIM0qsk_&Q#z`h}5Vg2ZTpxj=E-JsSh9=tisTXHd6SuP)h~ZfLajROK?bu%t3Z>G+Ca(iOk>6x4M7D5x!w?6}#b;=P{O(9cn} zRCM)aCJ(i^<%p6Ggs*OJSX^Kzjr9ZEOIYo^UU~-)m(;@^y`v|na2amuach}27@z-vD9l_V}`#yRRBdnDHW2YPaiJ6tYDfd9TlNM^3p&sZOZ`R-AEh#wip6xKRu^l@ui93>YPn zC{36&xGRVhPGr)pQQ)aoh}0*B5uyi@}YZ z^{Wf#)4K|ioA7trbvBcP!pr;CH(@ux?L3B~=YSi9IEPR1H3|J_y#+=XM#tb(u=Bm% z{)w_AoYLYIl$%ZG?%m>TJ7zLhBro9Q+u8i)`ZaXzgM%wqKZI8>S@+@5Wh$EIGRN8| zhsrK(-+nfmHY=Ageej+@iltLXEc$P2JGQ}RABW)2psJwi zQz5In*7-53gI~6*KmH37A#x0q7dlsptHN-ldCnz8peK9d1Yv9IJ`77E9YaWAdQ0X37LiI6t6mGNf*^V2cloVK5Y9x_H(^%vzJPilU zx%*@XT$Q5AL^sbgF^WP&2$wGy{d;`rwrw5H7W)*L;#@093v4J=@LX$A(43-;G>G(Z ze@(jZ)F6onLsB{0*K3&iRqNdlDTv+lVZ>$|Y#k}UE###zh15)9>V=RR{}k(@ET@Ia z@kp@TV1B1$q#z~Ro1+75d2JRA)0VNuQEl8(OUnw{SZpjXcKwa^vt2Hm*F# z|8rBHur@;gW?>j-&Ni+;g!=ure*h9uW=ReP001A02mk;800003ol{MZ+aM6VSL%Nl zajXgg3^wekRyOKZ>c^^f&la)qht@HXVYAA=FHR24M%gy1#P7RhgVRI`r8yOuR6OHCi3odxR17i|fU*;{%rME7Y=mv=L`H3DlTeXr zRX16y%?7@^knhbfn$2kAV&NJ~tKQa6wn4Wp{^b9gz72PJ{)q(nIJy@=YagPP!S^Si z$sVC!RnLaQ1SeclqBYU-mg_C1;e47O;bNHgww@2W5bv@4TI?A~mT#4>i;By%`n>v9 z6%Tn5hk0E`B8cLV7$wmFiz{)l&iO0K3D=Tc5I?#5T0E5HW3=)cyVqi|Bi@>E`3L_p zn2Wae?|uVN)}`m^0{{RYiwFP!000001MO2yZxb;Py(94-ET7=8mcQ0(=R^rXMW9ro z6vQd=I!?2!v7@y&DF43RO%SwAqzDd3sG7^nc>LbXXdcCs7g!d!1Vl_IpHK`6fJY-3 zVMH+^gybA_c58jIXr=wFYhIZUL)=AV!grXT@0&JjYg*eC+POx1V?Ifhti}7z4^ssW`)9egCbP>rF5k?jtY_y-&Lwaf~soe#ase zrLroqcs$bH)+ucksnb4clbYZM?#uXAH?Hq2cwxIPbQ8j{;+)9I_4NnPg+4Xj#z~Bh zEjp{0i+Y!GoLt-zpq=d!=zZ(M4-brf2?1dxu9#AaW)#dY7d&@!&X#^!ZK%?kQ?W8i z8C+XZtJciWLUy;gJ1rix~Jj5R~yfP0#Er7$S zXT$dMEQ5ck{W7j~Yxmg!-P@6G7E>O(iR*!LzptCa`U~*Jnr^pAhx~C;Nf1#2ky4 Date: Wed, 9 Oct 2024 12:12:32 +0300 Subject: [PATCH 17/90] Add test collector to Beats projects (#41172) * Add test collector for the Beats projects Signed-off-by: Alexandros Sapranidis --- .buildkite/auditbeat/auditbeat-pipeline.yml | 72 +++++++++++++++++ .buildkite/filebeat/filebeat-pipeline.yml | 66 ++++++++++++++++ .buildkite/heartbeat/heartbeat-pipeline.yml | 72 +++++++++++++++++ .buildkite/hooks/pre-command | 22 ++++++ .buildkite/libbeat/pipeline.libbeat.yml | 24 ++++++ .buildkite/metricbeat/pipeline.yml | 60 ++++++++++++++ .buildkite/packetbeat/pipeline.packetbeat.yml | 60 ++++++++++++++ .buildkite/winlogbeat/pipeline.winlogbeat.yml | 36 +++++++++ .../x-pack/pipeline.xpack.agentbeat.yml | 6 ++ .../x-pack/pipeline.xpack.auditbeat.yml | 60 ++++++++++++++ .../x-pack/pipeline.xpack.dockerlogbeat.yml | 12 +++ .buildkite/x-pack/pipeline.xpack.filebeat.yml | 72 +++++++++++++++++ .../x-pack/pipeline.xpack.heartbeat.yml | 54 +++++++++++++ .buildkite/x-pack/pipeline.xpack.libbeat.yml | 54 +++++++++++++ .../x-pack/pipeline.xpack.metricbeat.yml | 66 ++++++++++++++++ .../x-pack/pipeline.xpack.osquerybeat.yml | 54 +++++++++++++ .../x-pack/pipeline.xpack.packetbeat.yml | 78 +++++++++++++++++++ .../x-pack/pipeline.xpack.winlogbeat.yml | 36 +++++++++ 18 files changed, 904 insertions(+) diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index d11f067c4815..e083df17749b 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: RHEL9 Unit Tests" @@ -125,6 +137,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 2016 Unit Tests" @@ -145,6 +163,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 2022 Unit Tests" @@ -185,6 +209,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Integration Tests" @@ -206,6 +236,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Integration Tests" @@ -227,6 +263,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Unit Tests" @@ -247,6 +289,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: macOS x86_64 Unit Tests" @@ -267,6 +315,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: macOS arm64 Unit Tests" @@ -293,6 +347,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 2019 Unit Tests" @@ -314,6 +374,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 10 Unit Tests" @@ -335,6 +401,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 11 Unit Tests" diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index f92730b158cb..d882cf1c9340 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -87,6 +87,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Go Integration Tests" @@ -123,6 +135,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Python Integration Tests" @@ -144,6 +162,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 2016 Unit Tests" @@ -165,6 +189,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 2022 Unit Tests" @@ -191,6 +221,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: macOS x86_64 Unit Tests" @@ -212,6 +248,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: macOS arm64 Unit Tests" @@ -232,6 +274,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Ubuntu arm64 Unit Tests" @@ -258,6 +306,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 2019 Unit Tests" @@ -279,6 +333,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 11 Unit Tests" @@ -300,6 +360,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 10 Unit Tests" diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml index abdc8f73e330..27d4850f4de9 100644 --- a/.buildkite/heartbeat/heartbeat-pipeline.yml +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Ubuntu x86_64 Unit Tests" @@ -104,6 +110,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: RHEL9 Unit Tests" @@ -124,6 +136,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Win 2016 Unit Tests" @@ -144,6 +162,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Win 2022 Unit Tests" @@ -163,6 +187,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Go Integration Tests" @@ -182,6 +212,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Python Integration Tests" @@ -205,6 +241,12 @@ steps: imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" artifact_paths: "heartbeat/build/*.xml" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Ubuntu arm64 Unit Tests" @@ -226,6 +268,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: macOS x86_64 Unit Tests" @@ -247,6 +295,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: macOS arm64 Unit Tests" @@ -272,6 +326,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Win 2019 Unit Tests" @@ -292,6 +352,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Win 11 Unit Tests" @@ -312,6 +378,12 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "heartbeat: Win 10 Unit Tests" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index b905f053121b..621e867314d0 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -18,6 +18,28 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" && "$BUILDKITE_STEP export PRIVATE_CI_GCS_CREDENTIALS_SECRET fi +if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "heartbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-filebeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-dockerlogbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-metricbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-osquerybeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-winlogbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-libbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-auditbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-heartbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-winlogbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-agentbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + echo "--- Prepare BK test analytics token :vault:" + BUILDKITE_ANALYTICS_TOKEN=$(vault kv get -field token kv/ci-shared/platform-ingest/buildkite_beats_analytics_token) + export BUILDKITE_ANALYTICS_TOKEN +fi + CPU_ARCH=$(uname -m) PLATFORM_TYPE=$(uname) diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index b19cd0ae7517..67f7628861e5 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -77,6 +77,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*-unit.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Ubuntu x86_64 Unit Tests" @@ -97,6 +103,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Go Integration Tests" @@ -117,6 +129,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Python Integration Tests" @@ -180,6 +198,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 3ec9af58bf64..a23fc121d38e 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -91,6 +91,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Ubuntu x86_64 Unit Tests" @@ -113,6 +119,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Go Integration Tests (Module)" @@ -135,6 +147,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Python Integration Tests" @@ -173,6 +191,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 2016 Unit Tests" @@ -194,6 +218,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 2022 Unit Tests" @@ -220,6 +250,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 10 Unit Tests" @@ -241,6 +277,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 11 Unit Tests" @@ -262,6 +304,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 2019 Unit Tests" @@ -287,6 +335,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: macOS x86_64 Unit Tests" @@ -309,6 +363,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: macOS arm64 Unit Tests" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index 3237644dfabb..753dd182548e 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -85,6 +85,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Ubuntu x86_64 Unit Tests" @@ -103,6 +109,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: RHEL9 Unit Tests" @@ -123,6 +135,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 2016 Unit Tests" @@ -143,6 +161,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 2022 Unit Tests" @@ -168,6 +192,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 10 Unit Tests" @@ -189,6 +219,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 11 Unit Tests" @@ -210,6 +246,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 2019 Unit Tests" @@ -235,6 +277,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: macOS x86_64 Unit Tests" @@ -256,6 +304,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: macOS arm64 Unit Tests" @@ -276,6 +330,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml index d8986a72a54e..9ccbcea39fe2 100644 --- a/.buildkite/winlogbeat/pipeline.winlogbeat.yml +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -80,6 +80,12 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "winlogbeat: Crosscompile" @@ -101,6 +107,12 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "winlogbeat: Win 2016 Unit Tests" @@ -122,6 +134,12 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "winlogbeat: Win 2019 Unit Tests" @@ -143,6 +161,12 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "winlogbeat: Win 2022 Unit Tests" @@ -169,6 +193,12 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "winlogbeat: Win 10 Unit Tests" @@ -190,6 +220,12 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "winlogbeat: Win 11 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml index 70aa4362b865..ef7cb1598aa4 100644 --- a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml @@ -80,6 +80,12 @@ steps: - x-pack/agentbeat/build/distributions/**/* - "x-pack/agentbeat/build/*.xml" - "x-pack/agentbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/agentbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml index 7cebeab4787d..88dfb94bfb3d 100644 --- a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml @@ -89,6 +89,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Build Tests (Module)" @@ -108,6 +114,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: RHEL9 Unit Tests" @@ -129,6 +141,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2022 Unit Tests" @@ -150,6 +168,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2016 Unit Tests" @@ -176,6 +200,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2019 Unit Tests" @@ -197,6 +227,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 10 Unit Tests" @@ -218,6 +254,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 11 Unit Tests" @@ -243,6 +285,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: macOS x86_64 Unit Tests" @@ -263,6 +311,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: macOS arm64 Unit Tests" @@ -282,6 +336,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml index 54b3451b23e0..12ce197fbb69 100644 --- a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml @@ -76,6 +76,12 @@ steps: artifact_paths: - "x-pack/dockerlogbeat/build/*.xml" - "x-pack/dockerlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/dockerlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/dockerlogbeat: Ubuntu x86_64 Unit Tests" @@ -96,6 +102,12 @@ steps: artifact_paths: - "x-pack/dockerlogbeat/build/*.xml" - "x-pack/dockerlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/dockerlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/dockerlogbeat: Go Integration Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.filebeat.yml b/.buildkite/x-pack/pipeline.xpack.filebeat.yml index 57b15927d617..91425933abee 100644 --- a/.buildkite/x-pack/pipeline.xpack.filebeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.filebeat.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Go Integration Tests" @@ -124,6 +136,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Python Integration Tests" @@ -145,6 +163,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2022 Unit Tests" @@ -166,6 +190,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2016 Unit Tests" @@ -192,6 +222,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2019 Unit Tests" @@ -213,6 +249,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 10 Unit Tests" @@ -234,6 +276,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 11 Unit Tests" @@ -258,6 +306,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Ubuntu arm64 Unit Tests" @@ -278,6 +332,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: macOS x86_64 Unit Tests" @@ -298,6 +358,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: macOS arm64 Unit Tests" @@ -326,6 +392,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: AWS Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 414eeb06e752..30d98bec3509 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -94,6 +94,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Ubuntu x86_64 Unit Tests" @@ -117,6 +123,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Go Integration Tests" @@ -138,6 +150,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2016 Unit Tests" @@ -159,6 +177,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2022 Unit Tests" @@ -185,6 +209,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 10 Unit Tests" @@ -206,6 +236,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 11 Unit Tests" @@ -227,6 +263,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2019 Unit Tests" @@ -253,6 +295,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: macOS x86_64 Extended Tests" @@ -274,6 +322,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: macOS arm64 Extended Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.libbeat.yml b/.buildkite/x-pack/pipeline.xpack.libbeat.yml index 431ae1aed819..0a6993c733e9 100644 --- a/.buildkite/x-pack/pipeline.xpack.libbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.libbeat.yml @@ -82,6 +82,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Ubuntu x86_64 Unit Tests" @@ -101,6 +107,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Go Integration Tests" @@ -120,6 +132,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Python Integration Tests" @@ -141,6 +159,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 2016 Unit Tests" @@ -162,6 +186,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 2022 Unit Tests" @@ -188,6 +218,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 10 Unit Tests" @@ -209,6 +245,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 11 Unit Tests" @@ -230,6 +272,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 2019 Unit Tests" @@ -254,6 +302,12 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/libbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml index e616dd053897..abf627504510 100644 --- a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml @@ -87,6 +87,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Ubuntu x86_64 Unit Tests" @@ -108,6 +114,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Go Integration Tests (Module)" @@ -129,6 +141,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Python Integration Tests (Module)" @@ -150,6 +168,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2016 Unit Tests" @@ -171,6 +195,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2022 Unit Tests" @@ -197,6 +227,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 10 Unit Tests" @@ -218,6 +254,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 11 Unit Tests" @@ -239,6 +281,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2019 Unit Tests" @@ -263,6 +311,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: macOS x86_64 Unit Tests" @@ -284,6 +338,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: macOS arm64 Unit Tests" @@ -310,6 +370,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: AWS Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml index 9c397f95d79a..6f58f8b20681 100644 --- a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml @@ -82,6 +82,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Ubuntu x86_64 Unit Tests" @@ -101,6 +107,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Go Integration Tests" @@ -122,6 +134,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 2016 Unit Tests" @@ -143,6 +161,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win-2022 Unit Tests" @@ -169,6 +193,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 10 Unit Tests" @@ -190,6 +220,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 11 Unit Tests" @@ -211,6 +247,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 2019 Unit Tests" @@ -234,6 +276,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: macOS x86_64 Unit Tests" @@ -252,6 +300,12 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/osquerybeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: macOS arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml index abf9950b9272..09279478de7e 100644 --- a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu x86_64 System Tests" @@ -124,6 +136,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: RHEL9 Unit Tests" @@ -145,6 +163,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2016 Unit Tests" @@ -166,6 +190,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2022 Unit Tests" @@ -188,6 +218,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2022 System Tests" @@ -214,6 +250,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 10 Unit Tests" @@ -235,6 +277,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 11 Unit Tests" @@ -256,6 +304,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2019 Unit Tests" @@ -278,6 +332,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 10 System Tests" @@ -303,6 +363,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu arm64 Unit Tests" @@ -324,6 +390,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: macOS x86_64 Unit Tests" @@ -345,6 +417,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: macOS arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml index f0a1b30c2536..398bf10dec50 100644 --- a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml @@ -79,6 +79,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat Win 2019 Unit Tests" @@ -100,6 +106,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2016 Unit Tests" @@ -121,6 +133,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2022 Unit Tests" @@ -147,6 +165,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 10 Unit Tests" @@ -168,6 +192,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 11 Unit Tests" @@ -189,6 +219,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2019 Unit Tests" From 77aa604d364b7f6545d306e21737304f239df7bf Mon Sep 17 00:00:00 2001 From: Alexandros Sapranidis Date: Wed, 9 Oct 2024 12:36:27 +0300 Subject: [PATCH 18/90] Revert "Add test collector to Beats projects (#41172)" (#41182) This reverts commit e86bea4156a3410d89bbdc0b3b63c6749b500661. --- .buildkite/auditbeat/auditbeat-pipeline.yml | 72 ----------------- .buildkite/filebeat/filebeat-pipeline.yml | 66 ---------------- .buildkite/heartbeat/heartbeat-pipeline.yml | 72 ----------------- .buildkite/hooks/pre-command | 22 ------ .buildkite/libbeat/pipeline.libbeat.yml | 24 ------ .buildkite/metricbeat/pipeline.yml | 60 -------------- .buildkite/packetbeat/pipeline.packetbeat.yml | 60 -------------- .buildkite/winlogbeat/pipeline.winlogbeat.yml | 36 --------- .../x-pack/pipeline.xpack.agentbeat.yml | 6 -- .../x-pack/pipeline.xpack.auditbeat.yml | 60 -------------- .../x-pack/pipeline.xpack.dockerlogbeat.yml | 12 --- .buildkite/x-pack/pipeline.xpack.filebeat.yml | 72 ----------------- .../x-pack/pipeline.xpack.heartbeat.yml | 54 ------------- .buildkite/x-pack/pipeline.xpack.libbeat.yml | 54 ------------- .../x-pack/pipeline.xpack.metricbeat.yml | 66 ---------------- .../x-pack/pipeline.xpack.osquerybeat.yml | 54 ------------- .../x-pack/pipeline.xpack.packetbeat.yml | 78 ------------------- .../x-pack/pipeline.xpack.winlogbeat.yml | 36 --------- 18 files changed, 904 deletions(-) diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index e083df17749b..d11f067c4815 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -86,12 +86,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Unit Tests" @@ -111,12 +105,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: RHEL9 Unit Tests" @@ -137,12 +125,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Win 2016 Unit Tests" @@ -163,12 +145,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Win 2022 Unit Tests" @@ -209,12 +185,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Integration Tests" @@ -236,12 +206,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Integration Tests" @@ -263,12 +227,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Unit Tests" @@ -289,12 +247,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: macOS x86_64 Unit Tests" @@ -315,12 +267,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: macOS arm64 Unit Tests" @@ -347,12 +293,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Win 2019 Unit Tests" @@ -374,12 +314,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Win 10 Unit Tests" @@ -401,12 +335,6 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "auditbeat: Win 11 Unit Tests" diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index d882cf1c9340..f92730b158cb 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -87,12 +87,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Ubuntu x86_64 Unit Tests" @@ -111,12 +105,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Go Integration Tests" @@ -135,12 +123,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Python Integration Tests" @@ -162,12 +144,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Win 2016 Unit Tests" @@ -189,12 +165,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Win 2022 Unit Tests" @@ -221,12 +191,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: macOS x86_64 Unit Tests" @@ -248,12 +212,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: macOS arm64 Unit Tests" @@ -274,12 +232,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Ubuntu arm64 Unit Tests" @@ -306,12 +258,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Win 2019 Unit Tests" @@ -333,12 +279,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Win 11 Unit Tests" @@ -360,12 +300,6 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "filebeat: Win 10 Unit Tests" diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml index 27d4850f4de9..abdc8f73e330 100644 --- a/.buildkite/heartbeat/heartbeat-pipeline.yml +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -86,12 +86,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Ubuntu x86_64 Unit Tests" @@ -110,12 +104,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: RHEL9 Unit Tests" @@ -136,12 +124,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Win 2016 Unit Tests" @@ -162,12 +144,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Win 2022 Unit Tests" @@ -187,12 +163,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Go Integration Tests" @@ -212,12 +182,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Python Integration Tests" @@ -241,12 +205,6 @@ steps: imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" artifact_paths: "heartbeat/build/*.xml" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Ubuntu arm64 Unit Tests" @@ -268,12 +226,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: macOS x86_64 Unit Tests" @@ -295,12 +247,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: macOS arm64 Unit Tests" @@ -326,12 +272,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Win 2019 Unit Tests" @@ -352,12 +292,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Win 11 Unit Tests" @@ -378,12 +312,6 @@ steps: artifact_paths: - "heartbeat/build/*.xml" - "heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "heartbeat: Win 10 Unit Tests" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 621e867314d0..b905f053121b 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -18,28 +18,6 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" && "$BUILDKITE_STEP export PRIVATE_CI_GCS_CREDENTIALS_SECRET fi -if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "heartbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-filebeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-dockerlogbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-metricbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-osquerybeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-winlogbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-libbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-auditbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-heartbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-winlogbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-agentbeat" || \ - "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then - echo "--- Prepare BK test analytics token :vault:" - BUILDKITE_ANALYTICS_TOKEN=$(vault kv get -field token kv/ci-shared/platform-ingest/buildkite_beats_analytics_token) - export BUILDKITE_ANALYTICS_TOKEN -fi - CPU_ARCH=$(uname -m) PLATFORM_TYPE=$(uname) diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index 67f7628861e5..b19cd0ae7517 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -77,12 +77,6 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "libbeat/build/TEST-*-unit.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "libbeat: Ubuntu x86_64 Unit Tests" @@ -103,12 +97,6 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "libbeat: Go Integration Tests" @@ -129,12 +117,6 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "libbeat: Python Integration Tests" @@ -198,12 +180,6 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "libbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index a23fc121d38e..3ec9af58bf64 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -91,12 +91,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Ubuntu x86_64 Unit Tests" @@ -119,12 +113,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Go Integration Tests (Module)" @@ -147,12 +135,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Python Integration Tests" @@ -191,12 +173,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Win 2016 Unit Tests" @@ -218,12 +194,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Win 2022 Unit Tests" @@ -250,12 +220,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Win 10 Unit Tests" @@ -277,12 +241,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Win 11 Unit Tests" @@ -304,12 +262,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: Win 2019 Unit Tests" @@ -335,12 +287,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: macOS x86_64 Unit Tests" @@ -363,12 +309,6 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "metricbeat: macOS arm64 Unit Tests" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index 753dd182548e..3237644dfabb 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -85,12 +85,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Ubuntu x86_64 Unit Tests" @@ -109,12 +103,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: RHEL9 Unit Tests" @@ -135,12 +123,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Win 2016 Unit Tests" @@ -161,12 +143,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Win 2022 Unit Tests" @@ -192,12 +168,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Win 10 Unit Tests" @@ -219,12 +189,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Win 11 Unit Tests" @@ -246,12 +210,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Win 2019 Unit Tests" @@ -277,12 +235,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: macOS x86_64 Unit Tests" @@ -304,12 +256,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: macOS arm64 Unit Tests" @@ -330,12 +276,6 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "packetbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml index 9ccbcea39fe2..d8986a72a54e 100644 --- a/.buildkite/winlogbeat/pipeline.winlogbeat.yml +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -80,12 +80,6 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "winlogbeat: Crosscompile" @@ -107,12 +101,6 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "winlogbeat: Win 2016 Unit Tests" @@ -134,12 +122,6 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "winlogbeat: Win 2019 Unit Tests" @@ -161,12 +143,6 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "winlogbeat: Win 2022 Unit Tests" @@ -193,12 +169,6 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "winlogbeat: Win 10 Unit Tests" @@ -220,12 +190,6 @@ steps: artifact_paths: - "winlogbeat/build/*.xml" - "winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "winlogbeat: Win 11 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml index ef7cb1598aa4..70aa4362b865 100644 --- a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml @@ -80,12 +80,6 @@ steps: - x-pack/agentbeat/build/distributions/**/* - "x-pack/agentbeat/build/*.xml" - "x-pack/agentbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/agentbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml index 88dfb94bfb3d..7cebeab4787d 100644 --- a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml @@ -89,12 +89,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Build Tests (Module)" @@ -114,12 +108,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: RHEL9 Unit Tests" @@ -141,12 +129,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2022 Unit Tests" @@ -168,12 +150,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2016 Unit Tests" @@ -200,12 +176,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2019 Unit Tests" @@ -227,12 +197,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 10 Unit Tests" @@ -254,12 +218,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 11 Unit Tests" @@ -285,12 +243,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: macOS x86_64 Unit Tests" @@ -311,12 +263,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: macOS arm64 Unit Tests" @@ -336,12 +282,6 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/auditbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml index 12ce197fbb69..54b3451b23e0 100644 --- a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml @@ -76,12 +76,6 @@ steps: artifact_paths: - "x-pack/dockerlogbeat/build/*.xml" - "x-pack/dockerlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/dockerlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/dockerlogbeat: Ubuntu x86_64 Unit Tests" @@ -102,12 +96,6 @@ steps: artifact_paths: - "x-pack/dockerlogbeat/build/*.xml" - "x-pack/dockerlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/dockerlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/dockerlogbeat: Go Integration Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.filebeat.yml b/.buildkite/x-pack/pipeline.xpack.filebeat.yml index 91425933abee..57b15927d617 100644 --- a/.buildkite/x-pack/pipeline.xpack.filebeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.filebeat.yml @@ -86,12 +86,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Ubuntu x86_64 Unit Tests" @@ -111,12 +105,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Go Integration Tests" @@ -136,12 +124,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Python Integration Tests" @@ -163,12 +145,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2022 Unit Tests" @@ -190,12 +166,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2016 Unit Tests" @@ -222,12 +192,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2019 Unit Tests" @@ -249,12 +213,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 10 Unit Tests" @@ -276,12 +234,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 11 Unit Tests" @@ -306,12 +258,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: Ubuntu arm64 Unit Tests" @@ -332,12 +278,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: macOS x86_64 Unit Tests" @@ -358,12 +298,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: macOS arm64 Unit Tests" @@ -392,12 +326,6 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/filebeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/filebeat: AWS Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 30d98bec3509..414eeb06e752 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -94,12 +94,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Ubuntu x86_64 Unit Tests" @@ -123,12 +117,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Go Integration Tests" @@ -150,12 +138,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2016 Unit Tests" @@ -177,12 +159,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2022 Unit Tests" @@ -209,12 +185,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 10 Unit Tests" @@ -236,12 +206,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 11 Unit Tests" @@ -263,12 +227,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2019 Unit Tests" @@ -295,12 +253,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: macOS x86_64 Extended Tests" @@ -322,12 +274,6 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/heartbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/heartbeat: macOS arm64 Extended Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.libbeat.yml b/.buildkite/x-pack/pipeline.xpack.libbeat.yml index 0a6993c733e9..431ae1aed819 100644 --- a/.buildkite/x-pack/pipeline.xpack.libbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.libbeat.yml @@ -82,12 +82,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Ubuntu x86_64 Unit Tests" @@ -107,12 +101,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Go Integration Tests" @@ -132,12 +120,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Python Integration Tests" @@ -159,12 +141,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 2016 Unit Tests" @@ -186,12 +162,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 2022 Unit Tests" @@ -218,12 +188,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 10 Unit Tests" @@ -245,12 +209,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 11 Unit Tests" @@ -272,12 +230,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Win 2019 Unit Tests" @@ -302,12 +254,6 @@ steps: artifact_paths: - "x-pack/libbeat/build/*.xml" - "x-pack/libbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/libbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/libbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml index abf627504510..e616dd053897 100644 --- a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml @@ -87,12 +87,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Ubuntu x86_64 Unit Tests" @@ -114,12 +108,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Go Integration Tests (Module)" @@ -141,12 +129,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Python Integration Tests (Module)" @@ -168,12 +150,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2016 Unit Tests" @@ -195,12 +171,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2022 Unit Tests" @@ -227,12 +197,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 10 Unit Tests" @@ -254,12 +218,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 11 Unit Tests" @@ -281,12 +239,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2019 Unit Tests" @@ -311,12 +263,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: macOS x86_64 Unit Tests" @@ -338,12 +284,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: macOS arm64 Unit Tests" @@ -370,12 +310,6 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/metricbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/metricbeat: AWS Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml index 6f58f8b20681..9c397f95d79a 100644 --- a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml @@ -82,12 +82,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Ubuntu x86_64 Unit Tests" @@ -107,12 +101,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Go Integration Tests" @@ -134,12 +122,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 2016 Unit Tests" @@ -161,12 +143,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win-2022 Unit Tests" @@ -193,12 +169,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 10 Unit Tests" @@ -220,12 +190,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 11 Unit Tests" @@ -247,12 +211,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: Win 2019 Unit Tests" @@ -276,12 +234,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: macOS x86_64 Unit Tests" @@ -300,12 +252,6 @@ steps: artifact_paths: - "x-pack/osquerybeat/build/*.xml" - "x-pack/osquerybeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/osquerybeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/osquerybeat: macOS arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml index 09279478de7e..abf9950b9272 100644 --- a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml @@ -86,12 +86,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu x86_64 Unit Tests" @@ -111,12 +105,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu x86_64 System Tests" @@ -136,12 +124,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: RHEL9 Unit Tests" @@ -163,12 +145,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2016 Unit Tests" @@ -190,12 +166,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2022 Unit Tests" @@ -218,12 +188,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2022 System Tests" @@ -250,12 +214,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 10 Unit Tests" @@ -277,12 +235,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 11 Unit Tests" @@ -304,12 +256,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2019 Unit Tests" @@ -332,12 +278,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 10 System Tests" @@ -363,12 +303,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu arm64 Unit Tests" @@ -390,12 +324,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: macOS x86_64 Unit Tests" @@ -417,12 +345,6 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/packetbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/packetbeat: macOS arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml index 398bf10dec50..f0a1b30c2536 100644 --- a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml @@ -79,12 +79,6 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/winlogbeat Win 2019 Unit Tests" @@ -106,12 +100,6 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2016 Unit Tests" @@ -133,12 +121,6 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2022 Unit Tests" @@ -165,12 +147,6 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 10 Unit Tests" @@ -192,12 +168,6 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 11 Unit Tests" @@ -219,12 +189,6 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" - plugins: - - test-collector#v1.10.2: - files: "x-pack/winlogbeat/build/TEST-*.xml" - format: "junit" - branches: "main" - debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2019 Unit Tests" From 13d71a6a2d0ad0217a8aae6ae0ff20a68fef34f4 Mon Sep 17 00:00:00 2001 From: "Alex K." <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 9 Oct 2024 07:27:39 -0700 Subject: [PATCH 19/90] Fix cgroup helper init in `add_processor_metadata` and `add_docker_metadata` processors (#41108) * only initialize cgroup reader once * docs * remove old struct fields --------- Co-authored-by: Pierre HILBERT --- .../add_docker_metadata.go | 25 ++++++---- .../add_docker_metadata_test.go | 49 +++++++++++-------- .../add_process_metadata.go | 14 ++++-- .../add_process_metadata_test.go | 28 +++++++++-- .../gosigar_cid_provider.go | 10 ++-- libbeat/processors/cgroups.go | 32 ++++++++++++ 6 files changed, 114 insertions(+), 44 deletions(-) create mode 100644 libbeat/processors/cgroups.go diff --git a/libbeat/processors/add_docker_metadata/add_docker_metadata.go b/libbeat/processors/add_docker_metadata/add_docker_metadata.go index d670713894da..8c6b9d146b17 100644 --- a/libbeat/processors/add_docker_metadata/add_docker_metadata.go +++ b/libbeat/processors/add_docker_metadata/add_docker_metadata.go @@ -47,9 +47,11 @@ const ( cgroupCacheExpiration = 5 * time.Minute ) -// processGroupPaths returns the cgroups associated with a process. This enables +// initCgroupPaths initializes a new cgroup reader. This enables // unit testing by allowing us to stub the OS interface. -var processCgroupPaths = cgroup.ProcessCgroupPaths +var initCgroupPaths processors.InitCgroupHandler = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (processors.CGReader, error) { + return cgroup.NewReader(rootfsMountpoint, ignoreRootCgroups) +} func init() { processors.RegisterPlugin(processorName, New) @@ -61,11 +63,11 @@ type addDockerMetadata struct { fields []string sourceProcessor beat.Processor - pidFields []string // Field names that contain PIDs. - cgroups *common.Cache // Cache of PID (int) to cgropus (map[string]string). - hostFS resolve.Resolver // Directory where /proc is found - dedot bool // If set to true, replace dots in labels with `_`. - dockerAvailable bool // If Docker exists in env, then it is set to true + pidFields []string // Field names that contain PIDs. + cgroups *common.Cache // Cache of PID (int) to cgropus (map[string]string). + dedot bool // If set to true, replace dots in labels with `_`. + dockerAvailable bool // If Docker exists in env, then it is set to true + cgreader processors.CGReader } const selector = "add_docker_metadata" @@ -110,15 +112,20 @@ func buildDockerMetadataProcessor(log *logp.Logger, cfg *conf.C, watcherConstruc } } + reader, err := initCgroupPaths(resolve.NewTestResolver(config.HostFS), false) + if err != nil { + return nil, fmt.Errorf("error creating cgroup reader: %w", err) + } + return &addDockerMetadata{ log: log, watcher: watcher, fields: config.Fields, sourceProcessor: sourceProcessor, pidFields: config.MatchPIDs, - hostFS: resolve.NewTestResolver(config.HostFS), dedot: config.DeDot, dockerAvailable: dockerAvailable, + cgreader: reader, }, nil } @@ -277,7 +284,7 @@ func (d *addDockerMetadata) getProcessCgroups(pid int) (cgroup.PathList, error) return cgroups, nil } - cgroups, err := processCgroupPaths(d.hostFS, pid) + cgroups, err := d.cgreader.ProcessCgroupPaths(pid) if err != nil { return cgroups, fmt.Errorf("failed to read cgroups for pid=%v: %w", pid, err) } diff --git a/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go b/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go index 2b6663f71dc1..dc3d5e3003c6 100644 --- a/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go +++ b/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/elastic-agent-autodiscover/bus" "github.com/elastic/elastic-agent-autodiscover/docker" "github.com/elastic/elastic-agent-libs/config" @@ -37,29 +38,35 @@ import ( "github.com/elastic/elastic-agent-system-metrics/metric/system/resolve" ) +type testCGReader struct { +} + +func (r testCGReader) ProcessCgroupPaths(pid int) (cgroup.PathList, error) { + switch pid { + case 1000: + return cgroup.PathList{ + V1: map[string]cgroup.ControllerPath{ + "cpu": {ControllerPath: "/docker/8c147fdfab5a2608fe513d10294bf77cb502a231da9725093a155bd25cd1f14b", IsV2: false}, + }, + }, nil + case 2000: + return cgroup.PathList{ + V1: map[string]cgroup.ControllerPath{ + "memory": {ControllerPath: "/user.slice", IsV2: false}, + }, + }, nil + case 3000: + // Parser error (hopefully this never happens). + return cgroup.PathList{}, fmt.Errorf("cgroup parse failure") + default: + return cgroup.PathList{}, os.ErrNotExist + } +} + func init() { // Stub out the procfs. - processCgroupPaths = func(_ resolve.Resolver, pid int) (cgroup.PathList, error) { - - switch pid { - case 1000: - return cgroup.PathList{ - V1: map[string]cgroup.ControllerPath{ - "cpu": {ControllerPath: "/docker/8c147fdfab5a2608fe513d10294bf77cb502a231da9725093a155bd25cd1f14b", IsV2: false}, - }, - }, nil - case 2000: - return cgroup.PathList{ - V1: map[string]cgroup.ControllerPath{ - "memory": {ControllerPath: "/user.slice", IsV2: false}, - }, - }, nil - case 3000: - // Parser error (hopefully this never happens). - return cgroup.PathList{}, fmt.Errorf("cgroup parse failure") - default: - return cgroup.PathList{}, os.ErrNotExist - } + initCgroupPaths = func(_ resolve.Resolver, _ bool) (processors.CGReader, error) { + return testCGReader{}, nil } } diff --git a/libbeat/processors/add_process_metadata/add_process_metadata.go b/libbeat/processors/add_process_metadata/add_process_metadata.go index 8bb8ecea5a97..6bbd1c008977 100644 --- a/libbeat/processors/add_process_metadata/add_process_metadata.go +++ b/libbeat/processors/add_process_metadata/add_process_metadata.go @@ -54,7 +54,10 @@ var ( procCache = newProcessCache(cacheExpiration, cacheCapacity, cacheEvictionEffort, gosysinfoProvider{}) - processCgroupPaths = cgroup.ProcessCgroupPaths + // cgroups resolver, turned to a stub function to make testing easier. + initCgroupPaths processors.InitCgroupHandler = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (processors.CGReader, error) { + return cgroup.NewReader(rootfsMountpoint, ignoreRootCgroups) + } instanceID atomic.Uint32 ) @@ -160,6 +163,11 @@ func newProcessMetadataProcessorWithProvider(config config, provider processMeta } } + reader, err := initCgroupPaths(resolve.NewTestResolver(config.HostPath), false) + if err != nil { + return nil, fmt.Errorf("error creating cgroup reader: %w", err) + } + // don't use cgroup.ProcessCgroupPaths to save it from doing the work when container id disabled if ok := containsValue(mappings, "container.id"); ok { if withCache && config.CgroupCacheExpireTime != 0 { @@ -170,9 +178,9 @@ func newProcessMetadataProcessorWithProvider(config config, provider processMeta p.cgroupsCache = common.NewCacheWithRemovalListener(config.CgroupCacheExpireTime, 100, evictionListener) p.cgroupsCache.StartJanitor(config.CgroupCacheExpireTime) - p.cidProvider = newCidProvider(resolve.NewTestResolver(config.HostPath), config.CgroupPrefixes, config.CgroupRegex, processCgroupPaths, p.cgroupsCache) + p.cidProvider = newCidProvider(config.CgroupPrefixes, config.CgroupRegex, reader, p.cgroupsCache) } else { - p.cidProvider = newCidProvider(resolve.NewTestResolver(config.HostPath), config.CgroupPrefixes, config.CgroupRegex, processCgroupPaths, nil) + p.cidProvider = newCidProvider(config.CgroupPrefixes, config.CgroupRegex, reader, nil) } } diff --git a/libbeat/processors/add_process_metadata/add_process_metadata_test.go b/libbeat/processors/add_process_metadata/add_process_metadata_test.go index 977a554f3201..bd761b5e3a58 100644 --- a/libbeat/processors/add_process_metadata/add_process_metadata_test.go +++ b/libbeat/processors/add_process_metadata/add_process_metadata_test.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/capabilities" + "github.com/elastic/beats/v7/libbeat/processors" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" @@ -38,6 +39,20 @@ import ( "github.com/elastic/elastic-agent-system-metrics/metric/system/resolve" ) +type testCGRsolver struct { + res func(pid int) (cgroup.PathList, error) +} + +func (t testCGRsolver) ProcessCgroupPaths(pid int) (cgroup.PathList, error) { + return t.res(pid) +} + +func newCGHandlerBuilder(handler testCGRsolver) processors.InitCgroupHandler { + return func(_ resolve.Resolver, _ bool) (processors.CGReader, error) { + return handler, nil + } +} + func TestAddProcessMetadata(t *testing.T) { logp.TestingSetup(logp.WithSelectors(processorName)) @@ -90,7 +105,7 @@ func TestAddProcessMetadata(t *testing.T) { } // mock of the cgroup processCgroupPaths - processCgroupPaths = func(_ resolve.Resolver, pid int) (cgroup.PathList, error) { + processCgroupPaths := func(pid int) (cgroup.PathList, error) { testMap := map[int]cgroup.PathList{ 1: { V1: map[string]cgroup.ControllerPath{ @@ -135,6 +150,7 @@ func TestAddProcessMetadata(t *testing.T) { return testMap[pid], nil } + initCgroupPaths = newCGHandlerBuilder(testCGRsolver{res: processCgroupPaths}) for _, test := range []struct { description string @@ -884,7 +900,7 @@ func TestUsingCache(t *testing.T) { selfPID := os.Getpid() // mock of the cgroup processCgroupPaths - processCgroupPaths = func(_ resolve.Resolver, pid int) (cgroup.PathList, error) { + processCgroupPaths := func(pid int) (cgroup.PathList, error) { testStruct := cgroup.PathList{ V1: map[string]cgroup.ControllerPath{ "cpu": {ControllerPath: "/kubepods/besteffort/pod665fb997-575b-11ea-bfce-080027421ddf/b5285682fba7449c86452b89a800609440ecc88a7ba5f2d38bedfb85409b30b1"}, @@ -909,7 +925,7 @@ func TestUsingCache(t *testing.T) { // testMap := return testMap[pid], nil } - + initCgroupPaths = newCGHandlerBuilder(testCGRsolver{res: processCgroupPaths}) config, err := conf.NewConfigFrom(mapstr.M{ "match_pids": []string{"system.process.ppid"}, "include_fields": []string{"container.id", "process.env"}, @@ -1202,7 +1218,7 @@ func TestPIDToInt(t *testing.T) { } func TestV2CID(t *testing.T) { - processCgroupPaths = func(_ resolve.Resolver, _ int) (cgroup.PathList, error) { + processCgroupPaths := func(_ int) (cgroup.PathList, error) { testMap := cgroup.PathList{ V1: map[string]cgroup.ControllerPath{ "cpu": {IsV2: true, ControllerPath: "system.slice/docker-2dcbab615aebfa9313feffc5cfdacd381543cfa04c6be3f39ac656e55ef34805.scope"}, @@ -1210,7 +1226,9 @@ func TestV2CID(t *testing.T) { } return testMap, nil } - provider := newCidProvider(resolve.NewTestResolver(""), nil, defaultCgroupRegex, processCgroupPaths, nil) + resolver := testCGRsolver{res: processCgroupPaths} + initCgroupPaths = newCGHandlerBuilder(resolver) + provider := newCidProvider(nil, defaultCgroupRegex, resolver, nil) result, err := provider.GetCid(1) assert.NoError(t, err) assert.Equal(t, "2dcbab615aebfa9313feffc5cfdacd381543cfa04c6be3f39ac656e55ef34805", result) diff --git a/libbeat/processors/add_process_metadata/gosigar_cid_provider.go b/libbeat/processors/add_process_metadata/gosigar_cid_provider.go index 00c46f2b8bf3..d01e620c7c53 100644 --- a/libbeat/processors/add_process_metadata/gosigar_cid_provider.go +++ b/libbeat/processors/add_process_metadata/gosigar_cid_provider.go @@ -26,9 +26,9 @@ import ( "strings" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-system-metrics/metric/system/cgroup" - "github.com/elastic/elastic-agent-system-metrics/metric/system/resolve" ) const ( @@ -37,10 +37,9 @@ const ( type gosigarCidProvider struct { log *logp.Logger - hostPath resolve.Resolver cgroupPrefixes []string cgroupRegex *regexp.Regexp - processCgroupPaths func(resolve.Resolver, int) (cgroup.PathList, error) + processCgroupPaths processors.CGReader pidCidCache *common.Cache } @@ -70,10 +69,9 @@ func (p gosigarCidProvider) GetCid(pid int) (result string, err error) { return cid, nil } -func newCidProvider(hostPath resolve.Resolver, cgroupPrefixes []string, cgroupRegex *regexp.Regexp, processCgroupPaths func(resolve.Resolver, int) (cgroup.PathList, error), pidCidCache *common.Cache) gosigarCidProvider { +func newCidProvider(cgroupPrefixes []string, cgroupRegex *regexp.Regexp, processCgroupPaths processors.CGReader, pidCidCache *common.Cache) gosigarCidProvider { return gosigarCidProvider{ log: logp.NewLogger(providerName), - hostPath: hostPath, cgroupPrefixes: cgroupPrefixes, cgroupRegex: cgroupRegex, processCgroupPaths: processCgroupPaths, @@ -84,7 +82,7 @@ func newCidProvider(hostPath resolve.Resolver, cgroupPrefixes []string, cgroupRe // getProcessCgroups returns a mapping of cgroup subsystem name to path. It // returns an error if it failed to retrieve the cgroup info. func (p gosigarCidProvider) getProcessCgroups(pid int) (cgroup.PathList, error) { - pathList, err := p.processCgroupPaths(p.hostPath, pid) + pathList, err := p.processCgroupPaths.ProcessCgroupPaths(pid) if err != nil { var pathError *fs.PathError if errors.As(err, &pathError) { diff --git a/libbeat/processors/cgroups.go b/libbeat/processors/cgroups.go new file mode 100644 index 000000000000..8e54ae5535bf --- /dev/null +++ b/libbeat/processors/cgroups.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package processors + +import ( + "github.com/elastic/elastic-agent-system-metrics/metric/system/cgroup" + "github.com/elastic/elastic-agent-system-metrics/metric/system/resolve" +) + +// InitCgroupHandler is a type for creating stubs for the cgroup resolver. Used primarily for testing. +type InitCgroupHandler = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (CGReader, error) + +// CGReader wraps the group Reader.ProcessCgroupPaths() call, this allows us to +// set different cgroups readers for testing. +type CGReader interface { + ProcessCgroupPaths(pid int) (cgroup.PathList, error) +} From e580831bf2434a1d0425cbe1d71b792c85281990 Mon Sep 17 00:00:00 2001 From: Alexandros Sapranidis Date: Thu, 10 Oct 2024 14:40:01 +0300 Subject: [PATCH 20/90] Add test collector for Beats projects (#41184) Signed-off-by: Alexandros Sapranidis --- .buildkite/auditbeat/auditbeat-pipeline.yml | 72 +++++++++++++++++ .buildkite/filebeat/filebeat-pipeline.yml | 66 ++++++++++++++++ .buildkite/hooks/pre-command | 17 ++++ .buildkite/libbeat/pipeline.libbeat.yml | 24 ++++++ .buildkite/metricbeat/pipeline.yml | 60 ++++++++++++++ .buildkite/packetbeat/pipeline.packetbeat.yml | 60 ++++++++++++++ .../x-pack/pipeline.xpack.agentbeat.yml | 6 ++ .../x-pack/pipeline.xpack.auditbeat.yml | 60 ++++++++++++++ .buildkite/x-pack/pipeline.xpack.filebeat.yml | 72 +++++++++++++++++ .../x-pack/pipeline.xpack.heartbeat.yml | 54 +++++++++++++ .../x-pack/pipeline.xpack.metricbeat.yml | 66 ++++++++++++++++ .../x-pack/pipeline.xpack.packetbeat.yml | 78 +++++++++++++++++++ .../x-pack/pipeline.xpack.winlogbeat.yml | 36 +++++++++ 13 files changed, 671 insertions(+) diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index d11f067c4815..e083df17749b 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: RHEL9 Unit Tests" @@ -125,6 +137,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 2016 Unit Tests" @@ -145,6 +163,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 2022 Unit Tests" @@ -185,6 +209,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Integration Tests" @@ -206,6 +236,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Integration Tests" @@ -227,6 +263,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Unit Tests" @@ -247,6 +289,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: macOS x86_64 Unit Tests" @@ -267,6 +315,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: macOS arm64 Unit Tests" @@ -293,6 +347,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 2019 Unit Tests" @@ -314,6 +374,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 10 Unit Tests" @@ -335,6 +401,12 @@ steps: artifact_paths: - "auditbeat/build/*.xml" - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "auditbeat: Win 11 Unit Tests" diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index f92730b158cb..d882cf1c9340 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -87,6 +87,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Go Integration Tests" @@ -123,6 +135,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Python Integration Tests" @@ -144,6 +162,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 2016 Unit Tests" @@ -165,6 +189,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 2022 Unit Tests" @@ -191,6 +221,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: macOS x86_64 Unit Tests" @@ -212,6 +248,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: macOS arm64 Unit Tests" @@ -232,6 +274,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Ubuntu arm64 Unit Tests" @@ -258,6 +306,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 2019 Unit Tests" @@ -279,6 +333,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 11 Unit Tests" @@ -300,6 +360,12 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "filebeat: Win 10 Unit Tests" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index b905f053121b..5718d97879e1 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -18,6 +18,23 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" && "$BUILDKITE_STEP export PRIVATE_CI_GCS_CREDENTIALS_SECRET fi +if [[ "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-agentbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-auditbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-filebeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-heartbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-metricbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-packetbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-winlogbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "filebeat" ]]; then + echo "--- Prepare BK test analytics token :vault:" + BUILDKITE_ANALYTICS_TOKEN=$(vault kv get -field token kv/ci-shared/platform-ingest/buildkite_beats_analytics_token) + export BUILDKITE_ANALYTICS_TOKEN +fi + CPU_ARCH=$(uname -m) PLATFORM_TYPE=$(uname) diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index b19cd0ae7517..67f7628861e5 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -77,6 +77,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*-unit.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Ubuntu x86_64 Unit Tests" @@ -97,6 +103,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Go Integration Tests" @@ -117,6 +129,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Python Integration Tests" @@ -180,6 +198,12 @@ steps: artifact_paths: - "libbeat/build/*.xml" - "libbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "libbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "libbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 3ec9af58bf64..a23fc121d38e 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -91,6 +91,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Ubuntu x86_64 Unit Tests" @@ -113,6 +119,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Go Integration Tests (Module)" @@ -135,6 +147,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Python Integration Tests" @@ -173,6 +191,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 2016 Unit Tests" @@ -194,6 +218,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 2022 Unit Tests" @@ -220,6 +250,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 10 Unit Tests" @@ -241,6 +277,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 11 Unit Tests" @@ -262,6 +304,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: Win 2019 Unit Tests" @@ -287,6 +335,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: macOS x86_64 Unit Tests" @@ -309,6 +363,12 @@ steps: artifact_paths: - "metricbeat/build/*.xml" - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "metricbeat: macOS arm64 Unit Tests" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index 3237644dfabb..753dd182548e 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -85,6 +85,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Ubuntu x86_64 Unit Tests" @@ -103,6 +109,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: RHEL9 Unit Tests" @@ -123,6 +135,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 2016 Unit Tests" @@ -143,6 +161,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 2022 Unit Tests" @@ -168,6 +192,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 10 Unit Tests" @@ -189,6 +219,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 11 Unit Tests" @@ -210,6 +246,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Win 2019 Unit Tests" @@ -235,6 +277,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: macOS x86_64 Unit Tests" @@ -256,6 +304,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: macOS arm64 Unit Tests" @@ -276,6 +330,12 @@ steps: artifact_paths: - "packetbeat/build/*.xml" - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "packetbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml index 70aa4362b865..ef7cb1598aa4 100644 --- a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml @@ -80,6 +80,12 @@ steps: - x-pack/agentbeat/build/distributions/**/* - "x-pack/agentbeat/build/*.xml" - "x-pack/agentbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/agentbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml index 7cebeab4787d..88dfb94bfb3d 100644 --- a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml @@ -89,6 +89,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Build Tests (Module)" @@ -108,6 +114,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: RHEL9 Unit Tests" @@ -129,6 +141,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2022 Unit Tests" @@ -150,6 +168,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2016 Unit Tests" @@ -176,6 +200,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 2019 Unit Tests" @@ -197,6 +227,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 10 Unit Tests" @@ -218,6 +254,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Win 11 Unit Tests" @@ -243,6 +285,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: macOS x86_64 Unit Tests" @@ -263,6 +311,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: macOS arm64 Unit Tests" @@ -282,6 +336,12 @@ steps: artifact_paths: - "x-pack/auditbeat/build/*.xml" - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/auditbeat: Ubuntu arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.filebeat.yml b/.buildkite/x-pack/pipeline.xpack.filebeat.yml index 57b15927d617..91425933abee 100644 --- a/.buildkite/x-pack/pipeline.xpack.filebeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.filebeat.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Go Integration Tests" @@ -124,6 +136,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Python Integration Tests" @@ -145,6 +163,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2022 Unit Tests" @@ -166,6 +190,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2016 Unit Tests" @@ -192,6 +222,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 2019 Unit Tests" @@ -213,6 +249,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 10 Unit Tests" @@ -234,6 +276,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Win 11 Unit Tests" @@ -258,6 +306,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: Ubuntu arm64 Unit Tests" @@ -278,6 +332,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: macOS x86_64 Unit Tests" @@ -298,6 +358,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: macOS arm64 Unit Tests" @@ -326,6 +392,12 @@ steps: artifact_paths: - "x-pack/filebeat/build/*.xml" - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/filebeat: AWS Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 414eeb06e752..30d98bec3509 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -94,6 +94,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Ubuntu x86_64 Unit Tests" @@ -117,6 +123,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Go Integration Tests" @@ -138,6 +150,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2016 Unit Tests" @@ -159,6 +177,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2022 Unit Tests" @@ -185,6 +209,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 10 Unit Tests" @@ -206,6 +236,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 11 Unit Tests" @@ -227,6 +263,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: Win 2019 Unit Tests" @@ -253,6 +295,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: macOS x86_64 Extended Tests" @@ -274,6 +322,12 @@ steps: artifact_paths: - "x-pack/heartbeat/build/*.xml" - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/heartbeat: macOS arm64 Extended Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml index e616dd053897..abf627504510 100644 --- a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml @@ -87,6 +87,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Ubuntu x86_64 Unit Tests" @@ -108,6 +114,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Go Integration Tests (Module)" @@ -129,6 +141,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Python Integration Tests (Module)" @@ -150,6 +168,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2016 Unit Tests" @@ -171,6 +195,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2022 Unit Tests" @@ -197,6 +227,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 10 Unit Tests" @@ -218,6 +254,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 11 Unit Tests" @@ -239,6 +281,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: Win 2019 Unit Tests" @@ -263,6 +311,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: macOS x86_64 Unit Tests" @@ -284,6 +338,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: macOS arm64 Unit Tests" @@ -310,6 +370,12 @@ steps: artifact_paths: - "x-pack/metricbeat/build/*.xml" - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/metricbeat: AWS Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml index abf9950b9272..09279478de7e 100644 --- a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml @@ -86,6 +86,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu x86_64 Unit Tests" @@ -105,6 +111,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu x86_64 System Tests" @@ -124,6 +136,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: RHEL9 Unit Tests" @@ -145,6 +163,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2016 Unit Tests" @@ -166,6 +190,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2022 Unit Tests" @@ -188,6 +218,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2022 System Tests" @@ -214,6 +250,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 10 Unit Tests" @@ -235,6 +277,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 11 Unit Tests" @@ -256,6 +304,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 2019 Unit Tests" @@ -278,6 +332,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Win 10 System Tests" @@ -303,6 +363,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: Ubuntu arm64 Unit Tests" @@ -324,6 +390,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: macOS x86_64 Unit Tests" @@ -345,6 +417,12 @@ steps: artifact_paths: - "x-pack/packetbeat/build/*.xml" - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/packetbeat: macOS arm64 Unit Tests" diff --git a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml index f0a1b30c2536..398bf10dec50 100644 --- a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml @@ -79,6 +79,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat Win 2019 Unit Tests" @@ -100,6 +106,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2016 Unit Tests" @@ -121,6 +133,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2022 Unit Tests" @@ -147,6 +165,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 10 Unit Tests" @@ -168,6 +192,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 11 Unit Tests" @@ -189,6 +219,12 @@ steps: artifact_paths: - "x-pack/winlogbeat/build/*.xml" - "x-pack/winlogbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/winlogbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true notify: - github_commit_status: context: "x-pack/winlogbeat: Win 2019 Unit Tests" From f369a281c19a1343ec2a93bdfea8dc324e13aa2f Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Thu, 10 Oct 2024 11:11:02 -0300 Subject: [PATCH 21/90] fix: restore stderr logs when running on docker or systemd environments This PR updates elastic-agent-libs to the latest version in order to fix https://github.com/elastic/beats/issues/41118. --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 67fdb6be883d..d6b9ec916077 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -13019,11 +13019,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.11.0 +Version: v0.12.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.11.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.12.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index e20e3b0fc12c..b7d4b5ecb745 100644 --- a/go.mod +++ b/go.mod @@ -189,7 +189,7 @@ require ( github.com/elastic/bayeux v1.0.5 github.com/elastic/ebpfevents v0.6.0 github.com/elastic/elastic-agent-autodiscover v0.8.2 - github.com/elastic/elastic-agent-libs v0.11.0 + github.com/elastic/elastic-agent-libs v0.12.1 github.com/elastic/elastic-agent-system-metrics v0.11.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 github.com/elastic/go-sfdc v0.0.0-20240621062639-bcc8456508ff diff --git a/go.sum b/go.sum index 8467b9cf3d3e..8e41f529a64c 100644 --- a/go.sum +++ b/go.sum @@ -338,8 +338,8 @@ github.com/elastic/elastic-agent-autodiscover v0.8.2 h1:Fs2FhR33AMBPfm5/jz4drVza github.com/elastic/elastic-agent-autodiscover v0.8.2/go.mod h1:VZnU53EVaFTxR8Xf6YsLN8FHD5DKQzHSPlKax9/4w+o= github.com/elastic/elastic-agent-client/v7 v7.15.0 h1:nDB7v8TBoNuD6IIzC3z7Q0y+7bMgXoT2DsHfolO2CHE= github.com/elastic/elastic-agent-client/v7 v7.15.0/go.mod h1:6h+f9QdIr3GO2ODC0Y8+aEXRwzbA5W4eV4dd/67z7nI= -github.com/elastic/elastic-agent-libs v0.11.0 h1:m9rnNE3BkBF2XJoqubqEbu/kbtKEBZ7pHCjDlxfVRH0= -github.com/elastic/elastic-agent-libs v0.11.0/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= +github.com/elastic/elastic-agent-libs v0.12.1 h1:5jkxMx15Bna8cq7/Sz/XUIVUXfNWiJ80iSk4ICQ7KJ0= +github.com/elastic/elastic-agent-libs v0.12.1/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= github.com/elastic/elastic-agent-system-metrics v0.11.1 h1:BxViQHnqxvvi/65rj3mGwG6Eto6ldFCTnuDTUJnakaU= github.com/elastic/elastic-agent-system-metrics v0.11.1/go.mod h1:3QiMu9wTKJFvpCN+5klgGqasTMNKJbgY3xcoN1KQXJk= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= From 648ec22dc68296de7a31f1fc73b2b0e36b259afd Mon Sep 17 00:00:00 2001 From: "Alex K." <8418476+fearful-symmetry@users.noreply.github.com> Date: Thu, 10 Oct 2024 10:39:41 -0700 Subject: [PATCH 22/90] Add tests, fallback properly if we can't init the cgroups (#41189) * add tests, fallback properly if we can't init the cgroups * linter * fix test --- .../add_docker_metadata.go | 5 +++- .../add_docker_metadata_test.go | 27 +++++++++++++++++++ .../add_process_metadata.go | 2 +- .../add_process_metadata_test.go | 17 ++++++++++++ .../gosigar_cid_provider.go | 13 +++------ 5 files changed, 53 insertions(+), 11 deletions(-) diff --git a/libbeat/processors/add_docker_metadata/add_docker_metadata.go b/libbeat/processors/add_docker_metadata/add_docker_metadata.go index 8c6b9d146b17..c1b0afeb9fa4 100644 --- a/libbeat/processors/add_docker_metadata/add_docker_metadata.go +++ b/libbeat/processors/add_docker_metadata/add_docker_metadata.go @@ -113,7 +113,7 @@ func buildDockerMetadataProcessor(log *logp.Logger, cfg *conf.C, watcherConstruc } reader, err := initCgroupPaths(resolve.NewTestResolver(config.HostFS), false) - if err != nil { + if err != nil && !errors.Is(err, cgroup.ErrCgroupsMissing) { return nil, fmt.Errorf("error creating cgroup reader: %w", err) } @@ -284,6 +284,9 @@ func (d *addDockerMetadata) getProcessCgroups(pid int) (cgroup.PathList, error) return cgroups, nil } + if d.cgreader == nil { + return cgroups, fs.ErrNotExist + } cgroups, err := d.cgreader.ProcessCgroupPaths(pid) if err != nil { return cgroups, fmt.Errorf("failed to read cgroups for pid=%v: %w", pid, err) diff --git a/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go b/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go index dc3d5e3003c6..6cbf85d235c6 100644 --- a/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go +++ b/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go @@ -26,6 +26,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/processors" @@ -70,6 +71,32 @@ func init() { } } +func TestDefaultProcessorStartup(t *testing.T) { + // set initCgroupPaths to system non-test defaults + initCgroupPaths = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (processors.CGReader, error) { + return cgroup.NewReader(rootfsMountpoint, ignoreRootCgroups) + } + + defer func() { + initCgroupPaths = func(_ resolve.Resolver, _ bool) (processors.CGReader, error) { + return testCGReader{}, nil + } + }() + + rawCfg := defaultConfig() + cfg, err := config.NewConfigFrom(rawCfg) + require.NoError(t, err) + + proc, err := buildDockerMetadataProcessor(logp.L(), cfg, docker.NewWatcher) + require.NoError(t, err) + + unwrapped, _ := proc.(*addDockerMetadata) + + // make sure pid readers have been initialized properly + _, err = unwrapped.getProcessCgroups(os.Getpid()) + require.NoError(t, err) +} + func TestInitializationNoDocker(t *testing.T) { var testConfig = config.NewConfig() testConfig.SetString("host", -1, "unix:///var/run42/docker.sock") diff --git a/libbeat/processors/add_process_metadata/add_process_metadata.go b/libbeat/processors/add_process_metadata/add_process_metadata.go index 6bbd1c008977..6e88341993f9 100644 --- a/libbeat/processors/add_process_metadata/add_process_metadata.go +++ b/libbeat/processors/add_process_metadata/add_process_metadata.go @@ -164,7 +164,7 @@ func newProcessMetadataProcessorWithProvider(config config, provider processMeta } reader, err := initCgroupPaths(resolve.NewTestResolver(config.HostPath), false) - if err != nil { + if err != nil && !errors.Is(err, cgroup.ErrCgroupsMissing) { return nil, fmt.Errorf("error creating cgroup reader: %w", err) } diff --git a/libbeat/processors/add_process_metadata/add_process_metadata_test.go b/libbeat/processors/add_process_metadata/add_process_metadata_test.go index bd761b5e3a58..128b70a3d3c5 100644 --- a/libbeat/processors/add_process_metadata/add_process_metadata_test.go +++ b/libbeat/processors/add_process_metadata/add_process_metadata_test.go @@ -28,6 +28,7 @@ import ( "unsafe" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/capabilities" @@ -53,6 +54,22 @@ func newCGHandlerBuilder(handler testCGRsolver) processors.InitCgroupHandler { } } +func TestDefaultProcessorStartup(t *testing.T) { + // set initCgroupPaths to system non-test defaults + initCgroupPaths = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (processors.CGReader, error) { + return cgroup.NewReader(rootfsMountpoint, ignoreRootCgroups) + } + + proc, err := newProcessMetadataProcessorWithProvider(defaultConfig(), &procCache, false) + require.NoError(t, err) + + // ensure the underlying provider has been initialized properly + unwrapped, _ := proc.(*addProcessMetadata) + metadata, err := unwrapped.provider.GetProcessMetadata(os.Getpid()) + require.NoError(t, err) + require.NotNil(t, metadata) +} + func TestAddProcessMetadata(t *testing.T) { logp.TestingSetup(logp.WithSelectors(processorName)) diff --git a/libbeat/processors/add_process_metadata/gosigar_cid_provider.go b/libbeat/processors/add_process_metadata/gosigar_cid_provider.go index d01e620c7c53..d1f09c5fd855 100644 --- a/libbeat/processors/add_process_metadata/gosigar_cid_provider.go +++ b/libbeat/processors/add_process_metadata/gosigar_cid_provider.go @@ -18,9 +18,7 @@ package add_process_metadata import ( - "errors" "fmt" - "io/fs" "path/filepath" "regexp" "strings" @@ -82,15 +80,12 @@ func newCidProvider(cgroupPrefixes []string, cgroupRegex *regexp.Regexp, process // getProcessCgroups returns a mapping of cgroup subsystem name to path. It // returns an error if it failed to retrieve the cgroup info. func (p gosigarCidProvider) getProcessCgroups(pid int) (cgroup.PathList, error) { + //return nil if we aren't supporting cgroups + if p.processCgroupPaths == nil { + return cgroup.PathList{}, nil + } pathList, err := p.processCgroupPaths.ProcessCgroupPaths(pid) if err != nil { - var pathError *fs.PathError - if errors.As(err, &pathError) { - // do no thing when err is nil or when os.PathError happens because the process don't exist, - // or not running in linux system - return cgroup.PathList{}, nil - } - // should never happen return cgroup.PathList{}, fmt.Errorf("failed to read cgroups for pid=%v: %w", pid, err) } From 575c7cc5e1253aa2ca985b62548865bf4467d156 Mon Sep 17 00:00:00 2001 From: kruskall <99559985+kruskall@users.noreply.github.com> Date: Fri, 11 Oct 2024 02:16:43 +0200 Subject: [PATCH 23/90] fix(auditbeat): only create ebpfreader on supported linux platforms (#41040) * fix(auditbeat): only create ebpfreader on supported linux platforms ebpf reader/watcher is only supported on linux amd64/arm64 to avoid compile errors on other systems return an error if the ebpf backend is requested and ebpf is not supported * lint: newEbpfReader -> newEBPFReader --- .../module/file_integrity/ebpfreader_other.go | 30 +++++++++++++++ .../file_integrity/ebpfreader_supported.go | 37 +++++++++++++++++++ .../module/file_integrity/eventreader_ebpf.go | 2 +- .../file_integrity/eventreader_linux.go | 13 +------ libbeat/ebpf/watcher_linux.go | 2 +- 5 files changed, 70 insertions(+), 14 deletions(-) create mode 100644 auditbeat/module/file_integrity/ebpfreader_other.go create mode 100644 auditbeat/module/file_integrity/ebpfreader_supported.go diff --git a/auditbeat/module/file_integrity/ebpfreader_other.go b/auditbeat/module/file_integrity/ebpfreader_other.go new file mode 100644 index 000000000000..0bc7a9b11422 --- /dev/null +++ b/auditbeat/module/file_integrity/ebpfreader_other.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux && !(amd64 || arm64) + +package file_integrity + +import ( + "errors" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func newEBPFReader(c Config, l *logp.Logger) (EventProducer, error) { + return nil, errors.New("ebpf reader is not implemented on this system") +} diff --git a/auditbeat/module/file_integrity/ebpfreader_supported.go b/auditbeat/module/file_integrity/ebpfreader_supported.go new file mode 100644 index 000000000000..186e17b2bc9a --- /dev/null +++ b/auditbeat/module/file_integrity/ebpfreader_supported.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux && (amd64 || arm64) + +package file_integrity + +import "github.com/elastic/elastic-agent-libs/logp" + +func newEBPFReader(c Config, l *logp.Logger) (EventProducer, error) { + paths := make(map[string]struct{}) + for _, p := range c.Paths { + paths[p] = struct{}{} + } + + return &ebpfReader{ + config: c, + log: l, + parsers: FileParsers(c), + paths: paths, + eventC: make(chan Event), + }, nil +} diff --git a/auditbeat/module/file_integrity/eventreader_ebpf.go b/auditbeat/module/file_integrity/eventreader_ebpf.go index 2fb452861e84..8e56866d9b1f 100644 --- a/auditbeat/module/file_integrity/eventreader_ebpf.go +++ b/auditbeat/module/file_integrity/eventreader_ebpf.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -//go:build linux +//go:build linux && (amd64 || arm64) package file_integrity diff --git a/auditbeat/module/file_integrity/eventreader_linux.go b/auditbeat/module/file_integrity/eventreader_linux.go index c6b3d330c772..594fc67e750a 100644 --- a/auditbeat/module/file_integrity/eventreader_linux.go +++ b/auditbeat/module/file_integrity/eventreader_linux.go @@ -41,18 +41,7 @@ func NewEventReader(c Config, logger *logp.Logger) (EventProducer, error) { l := logger.Named("ebpf") l.Info("selected backend: ebpf") - paths := make(map[string]struct{}) - for _, p := range c.Paths { - paths[p] = struct{}{} - } - - return &ebpfReader{ - config: c, - log: l, - parsers: FileParsers(c), - paths: paths, - eventC: make(chan Event), - }, nil + return newEBPFReader(c, l) } if c.Backend == BackendKprobes { diff --git a/libbeat/ebpf/watcher_linux.go b/libbeat/ebpf/watcher_linux.go index e0da448d87a6..7fea4b571c7a 100644 --- a/libbeat/ebpf/watcher_linux.go +++ b/libbeat/ebpf/watcher_linux.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -//go:build linux +//go:build linux && (amd64 || arm64) package ebpf From a25c5a5dd79d92e97e1168b1f233419a847bb2b7 Mon Sep 17 00:00:00 2001 From: subham sarkar Date: Fri, 11 Oct 2024 12:43:59 +0530 Subject: [PATCH 24/90] Bump `github.com/elastic/go-sfdc` to `master@8e176480d727` (#41192) * Bump github.com/elastic/go-sfdc to master@8e176480d727 * Add changelog and update NOTICE --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index edc156862980..d3022fce790f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -173,6 +173,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add backup and delete for AWS S3 polling mode feature back. {pull}41071[41071] - Fix a bug in Salesforce input to only handle responses with 200 status code {pull}41015[41015] - Fixed failed job handling and removed false-positive error logs in the GCS input. {pull}41142[41142] +- Bump github.com/elastic/go-sfdc dependency used by x-pack/filebeat/input/salesforce. {pull}41192[41192] *Heartbeat* diff --git a/NOTICE.txt b/NOTICE.txt index d6b9ec916077..1e0806b737aa 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -14959,11 +14959,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-seccomp-bpf@ -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-sfdc -Version: v0.0.0-20240621062639-bcc8456508ff +Version: v0.0.0-20241010131323-8e176480d727 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sfdc@v0.0.0-20240621062639-bcc8456508ff/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sfdc@v0.0.0-20241010131323-8e176480d727/LICENSE.txt: MIT License diff --git a/go.mod b/go.mod index b7d4b5ecb745..7a5392381a6b 100644 --- a/go.mod +++ b/go.mod @@ -192,7 +192,7 @@ require ( github.com/elastic/elastic-agent-libs v0.12.1 github.com/elastic/elastic-agent-system-metrics v0.11.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 - github.com/elastic/go-sfdc v0.0.0-20240621062639-bcc8456508ff + github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.15.0 github.com/elastic/tk-btf v0.1.0 github.com/elastic/toutoumomoma v0.0.0-20240626215117-76e39db18dfb diff --git a/go.sum b/go.sum index 8e41f529a64c..495e0074d195 100644 --- a/go.sum +++ b/go.sum @@ -365,8 +365,8 @@ github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 h1:q8n4QjcLa4q39Q3 github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P1ZxnKx2OsqY7KlHdCesqZWIhyq0Gs/QC/Us= github.com/elastic/go-seccomp-bpf v1.4.0 h1:6y3lYrEHrLH9QzUgOiK8WDqmPaMnnB785WxibCNIOH4= github.com/elastic/go-seccomp-bpf v1.4.0/go.mod h1:wIMxjTbKpWGQk4CV9WltlG6haB4brjSH/dvAohBPM1I= -github.com/elastic/go-sfdc v0.0.0-20240621062639-bcc8456508ff h1:VjmGr45YsntTPgT1bcrzP9gRGHXlBu/XwR0uGSjGAfs= -github.com/elastic/go-sfdc v0.0.0-20240621062639-bcc8456508ff/go.mod h1:sw1pzz4pIqzDQxFWt3dFoG2uIUFAfThxlMfWpjH590E= +github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 h1:yuiN60oaQUz2PtNpNhDI2H6zrCdfiiptmNdwV5WUaKA= +github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727/go.mod h1:sw1pzz4pIqzDQxFWt3dFoG2uIUFAfThxlMfWpjH590E= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.14.2 h1:DeIy+pVfdRsd08Nx2Xjh+dUS+jrEEI7LGc29U/BKVWo= From 89ed20d5ea412ae913fcff6730d3d1304410a990 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 15:32:32 +0200 Subject: [PATCH 25/90] build(deps): bump the gcp-sdks group across 1 directory with 10 updates (#41117) * build(deps): bump the gcp-sdks group across 1 directory with 10 updates Bumps the gcp-sdks group with 3 updates in the / directory: [cloud.google.com/go/bigquery](https://github.com/googleapis/google-cloud-go), [cloud.google.com/go/compute/metadata](https://github.com/googleapis/google-cloud-go) and [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go). Updates `cloud.google.com/go/bigquery` from 1.62.0 to 1.63.1 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.62.0...bigquery/v1.63.1) Updates `cloud.google.com/go/monitoring` from 1.20.4 to 1.21.0 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/documentai/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/video/v1.20.4...video/v1.21.0) Updates `cloud.google.com/go/pubsub` from 1.41.0 to 1.42.0 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.41.0...pubsub/v1.42.0) Updates `cloud.google.com/go/compute` from 1.27.4 to 1.28.0 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/documentai/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/compute/v1.27.4...pubsub/v1.28.0) Updates `cloud.google.com/go/redis` from 1.16.4 to 1.17.0 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/documentai/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/redis/v1.16.4...kms/v1.17.0) Updates `cloud.google.com/go/auth` from 0.8.0 to 0.9.3 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/v0.8.0...auth/v0.9.3) Updates `cloud.google.com/go/compute/metadata` from 0.5.0 to 0.5.2 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/v0.5.0...auth/v0.5.2) Updates `cloud.google.com/go/iam` from 1.1.12 to 1.2.1 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.2.1) Updates `cloud.google.com/go/longrunning` from 0.5.11 to 0.6.1 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.11...auth/v0.6.1) Updates `cloud.google.com/go/storage` from 1.43.0 to 1.44.0 - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.43.0...spanner/v1.44.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/bigquery dependency-type: direct:production update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/monitoring dependency-type: direct:production update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/pubsub dependency-type: direct:production update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/compute dependency-type: direct:production update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/redis dependency-type: direct:production update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/auth dependency-type: indirect update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/compute/metadata dependency-type: indirect update-type: version-update:semver-patch dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/iam dependency-type: indirect update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/longrunning dependency-type: indirect update-type: version-update:semver-minor dependency-group: gcp-sdks - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor dependency-group: gcp-sdks ... Signed-off-by: dependabot[bot] * Update NOTICE.txt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- NOTICE.txt | 5257 +++++++++++++++++++++++++++++++++++++++++----------- go.mod | 55 +- go.sum | 122 +- 3 files changed, 4240 insertions(+), 1194 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 1e0806b737aa..2ea1ac2107c0 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -11,11 +11,11 @@ Third party libraries used by the Elastic Beats project: -------------------------------------------------------------------------------- Dependency : cloud.google.com/go -Version: v0.115.0 +Version: v0.115.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.115.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.115.1/LICENSE: Apache License @@ -223,11 +223,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.115.0/LICEN -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/bigquery -Version: v1.62.0 +Version: v1.63.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.62.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.63.1/LICENSE: Apache License @@ -435,11 +435,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.62 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/compute -Version: v1.27.4 +Version: v1.28.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.27.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.28.0/LICENSE: Apache License @@ -647,11 +647,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.27. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/monitoring -Version: v1.20.4 +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1.20.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1.21.0/LICENSE: Apache License @@ -859,11 +859,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/pubsub -Version: v1.41.0 +Version: v1.42.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.41.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.42.0/LICENSE: Apache License @@ -1071,11 +1071,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.41.0 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/redis -Version: v1.16.4 +Version: v1.17.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/redis@v1.16.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/redis@v1.17.0/LICENSE: Apache License @@ -1283,11 +1283,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/redis@v1.16.4/ -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/storage -Version: v1.43.0 +Version: v1.44.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.43.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.44.0/LICENSE: Apache License @@ -25428,11 +25428,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/oauth2 -Version: v0.22.0 +Version: v0.23.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.22.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.23.0/LICENSE: Copyright 2009 The Go Authors. @@ -25650,11 +25650,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/api -Version: v0.191.0 +Version: v0.197.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/api@v0.191.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/api@v0.197.0/LICENSE: Copyright (c) 2011 Google Inc. All rights reserved. @@ -25687,11 +25687,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto/googleapis/api -Version: v0.0.0-20240725223205-93522f1f2a9f +Version: v0.0.0-20240903143218-8af14fe29dc1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/api@v0.0.0-20240725223205-93522f1f2a9f/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/api@v0.0.0-20240903143218-8af14fe29dc1/LICENSE: Apache License @@ -25899,11 +25899,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googlea -------------------------------------------------------------------------------- Dependency : google.golang.org/grpc -Version: v1.66.0 +Version: v1.66.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.66.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.66.2/LICENSE: Apache License @@ -27996,12 +27996,12 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/auth -Version: v0.8.0 +Dependency : cel.dev/expr +Version: v0.16.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth@v0.8.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cel.dev/expr@v0.16.1/LICENSE: Apache License @@ -28208,12 +28208,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth@v0.8.0/LI -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/auth/oauth2adapt -Version: v0.2.4 +Dependency : cloud.google.com/go/auth +Version: v0.9.3 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth/oauth2adapt@v0.2.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth@v0.9.3/LICENSE: Apache License @@ -28420,12 +28420,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth/oauth2ada -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/compute/metadata -Version: v0.5.0 +Dependency : cloud.google.com/go/auth/oauth2adapt +Version: v0.2.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metadata@v0.5.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth/oauth2adapt@v0.2.4/LICENSE: Apache License @@ -28632,12 +28632,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metada -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/datacatalog -Version: v1.20.5 +Dependency : cloud.google.com/go/compute/metadata +Version: v0.5.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1.20.5/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metadata@v0.5.2/LICENSE: Apache License @@ -28844,12 +28844,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1 -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/iam -Version: v1.1.12 +Dependency : cloud.google.com/go/datacatalog +Version: v1.22.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.1.12/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1.22.1/LICENSE: Apache License @@ -29056,12 +29056,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.1.12/LI -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/kms -Version: v1.18.4 +Dependency : cloud.google.com/go/iam +Version: v1.2.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.18.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.2.1/LICENSE: Apache License @@ -29268,12 +29268,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.18.4/LI -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/longrunning -Version: v0.5.11 +Dependency : cloud.google.com/go/kms +Version: v1.19.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/longrunning@v0.5.11/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.19.0/LICENSE: Apache License @@ -29480,12 +29480,13 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/longrunning@v0 -------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/go-diodes -Version: v0.0.0-20190809170250-f77fb823c7ee +Dependency : cloud.google.com/go/logging +Version: v1.11.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/logging@v1.11.0/LICENSE: + Apache License Version 2.0, January 2004 @@ -29689,13 +29690,14 @@ Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0 See the License for the specific language governing permissions and limitations under the License. + -------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/gofileutils -Version: v0.0.0-20170111115228-4d0c80011a0f +Dependency : cloud.google.com/go/longrunning +Version: v0.6.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/gofileutils@v0.0.0-20170111115228-4d0c80011a0f/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/longrunning@v0.6.1/LICENSE: Apache License @@ -29902,47 +29904,681 @@ Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/gofileutils@ -------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/rfc5424 -Version: v0.0.0-20180905210152-236a6d29298a -Licence type (autodetected): BSD-2-Clause +Dependency : cloud.google.com/go/trace +Version: v1.11.0 +Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/rfc5424@v0.0.0-20180905210152-236a6d29298a/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/trace@v1.11.0/LICENSE: -BSD 2-Clause License -Copyright (c) 2016, Ross Kinder -All rights reserved. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + 1. Definitions. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/AdaLogics/go-fuzz-headers -Version: v0.0.0-20230811130428-ced1acdcaa24 +Dependency : code.cloudfoundry.org/go-diodes +Version: v0.0.0-20190809170250-f77fb823c7ee Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!ada!logics/go-fuzz-headers@v0.0.0-20230811130428-ced1acdcaa24/LICENSE: +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/gofileutils +Version: v0.0.0-20170111115228-4d0c80011a0f +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/gofileutils@v0.0.0-20170111115228-4d0c80011a0f/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/rfc5424 +Version: v0.0.0-20180905210152-236a6d29298a +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/rfc5424@v0.0.0-20180905210152-236a6d29298a/LICENSE: + +BSD 2-Clause License + +Copyright (c) 2016, Ross Kinder +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/AdaLogics/go-fuzz-headers +Version: v0.0.0-20230811130428-ced1acdcaa24 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!ada!logics/go-fuzz-headers@v0.0.0-20230811130428-ced1acdcaa24/LICENSE: Apache License Version 2.0, January 2004 @@ -32093,6 +32729,854 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-au SOFTWARE +-------------------------------------------------------------------------------- +Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp +Version: v1.24.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/detectors/gcp@v1.24.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric +Version: v0.48.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/exporter/metric@v0.48.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock +Version: v0.48.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/internal/cloudmock@v0.48.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping +Version: v0.48.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/internal/resourcemapping@v0.48.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/JohnCGriffin/overflow Version: v0.0.0-20211019200055-46fa312c352c @@ -34016,95 +35500,684 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ----------------------------------------------------------------------- +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 René Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/optional.hpp has the following license + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/apache/thrift +Version: v0.19.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- + + +-------------------------------------------------------------------------------- +Dependency : github.com/poy/eachers +Version: v0.0.0-20181020210610-23942921fe77 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: + +The MIT License (MIT) + +Copyright (c) 2016 Andrew Poydence + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/armon/go-radix +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream +Version: v1.6.4 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream@v1.6.4/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -cpp/src/arrow/vendored/base64.cpp has the following license + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -ZLIB License + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -Copyright (C) 2004-2017 René Nyffenegger + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -This source code is provided 'as-is', without any express or implied -warranty. In no event will the author be held liable for any damages arising -from the use of this software. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -Permission is granted to anyone to use this software for any purpose, including -commercial applications, and to alter it and redistribute it freely, subject to -the following restrictions: + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -1. The origin of this source code must not be misrepresented; you must not - claim that you wrote the original source code. If you use this source code - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original source code. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -3. This notice may not be removed or altered from any source distribution. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -René Nyffenegger rene.nyffenegger@adp-gmbh.ch + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. --------------------------------------------------------------------------------- + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -The file cpp/src/arrow/vendored/optional.hpp has the following license + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -Boost Software License - Version 1.0 - August 17th, 2003 + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. + END OF TERMS AND CONDITIONS --------------------------------------------------------------------------------- + APPENDIX: How to apply the Apache License to your work. -The file cpp/src/arrow/vendored/musl/strptime.c has the following license + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -Copyright © 2005-2020 Rich Felker, et al. + Copyright [yyyy] [name of copyright owner] -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. + http://www.apache.org/licenses/LICENSE-2.0 -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/apache/thrift -Version: v0.19.0 +Dependency : github.com/aws/aws-sdk-go-v2/internal/configsources +Version: v1.3.16 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16/LICENSE.txt: Apache License @@ -34309,179 +36382,226 @@ Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/L See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +Version: v2.6.16 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- --------------------------------------------------- -Portions of the following files are licensed under the MIT License: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16/LICENSE.txt: - lib/erl/src/Makefile.am -Please see doc/otp-base-license.txt for the full terms of this license. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. + 1. Definitions. --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -*/ -(By Douglas Crockford ) + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. --------------------------------------------------- -For lib/py/compat/win32/stdint.h + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." --------------------------------------------------- -Codegen template in t_html_generator.h + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. ---------------------------------------------------- -For t_cl_generator.cc + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: ---------------------------------------------------- + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and --------------------------------------------------------------------------------- -Dependency : github.com/poy/eachers -Version: v0.0.0-20181020210610-23942921fe77 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -The MIT License (MIT) + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -Copyright (c) 2016 Andrew Poydence + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. --------------------------------------------------------------------------------- -Dependency : github.com/armon/go-radix -Version: v1.0.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + END OF TERMS AND CONDITIONS -Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: + APPENDIX: How to apply the Apache License to your work. -The MIT License (MIT) + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -Copyright (c) 2014 Armon Dadgar + Copyright [yyyy] [name of copyright owner] -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + http://www.apache.org/licenses/LICENSE-2.0 -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream -Version: v1.6.4 +Dependency : github.com/aws/aws-sdk-go-v2/internal/ini +Version: v1.8.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream@v1.6.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/ini@v1.8.1/LICENSE.txt: Apache License @@ -34688,436 +36808,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/p -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/configsources +Dependency : github.com/aws/aws-sdk-go-v2/internal/v4a Version: v1.3.16 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16/LICENSE.txt: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -Version: v2.6.16 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16/LICENSE.txt: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/ini -Version: v1.8.1 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/ini@v1.8.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/v4a@v1.3.16/LICENSE.txt: Apache License @@ -35324,12 +37020,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/v4a -Version: v1.3.16 +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding +Version: v1.11.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/v4a@v1.3.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding@v1.11.4/LICENSE.txt: Apache License @@ -35536,12 +37232,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -Version: v1.11.4 +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/checksum +Version: v1.3.18 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding@v1.11.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/checksum@v1.3.18/LICENSE.txt: Apache License @@ -35748,12 +37444,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/checksum -Version: v1.3.18 +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +Version: v1.11.18 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/checksum@v1.3.18/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url@v1.11.18/LICENSE.txt: Apache License @@ -35960,12 +37656,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -Version: v1.11.18 +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/s3shared +Version: v1.17.16 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url@v1.11.18/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/s3shared@v1.17.16/LICENSE.txt: Apache License @@ -36172,12 +37868,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/s3shared -Version: v1.17.16 +Dependency : github.com/aws/aws-sdk-go-v2/service/sso +Version: v1.22.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/s3shared@v1.17.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/sso@v1.22.5/LICENSE.txt: Apache License @@ -36384,12 +38080,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/sso -Version: v1.22.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/ssooidc +Version: v1.26.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/sso@v1.22.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/ssooidc@v1.26.5/LICENSE.txt: Apache License @@ -36596,12 +38292,134 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/ssooidc -Version: v1.26.5 +Dependency : github.com/benbjohnson/clock +Version: v1.3.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/benbjohnson/clock@v1.3.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/beorn7/perks +Version: v1.0.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/beorn7/perks@v1.0.1/LICENSE: + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/bluekeyes/go-gitdiff +Version: v0.7.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/bluekeyes/go-gitdiff@v0.7.1/LICENSE: + +MIT License + +Copyright (c) 2019 Billy Keyes + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/cenkalti/backoff/v4 +Version: v4.3.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff/v4@v4.3.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/census-instrumentation/opencensus-proto +Version: v0.4.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/ssooidc@v1.26.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/census-instrumentation/opencensus-proto@v0.4.1/LICENSE: Apache License @@ -36808,16 +38626,18 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/benbjohnson/clock -Version: v1.3.0 +Dependency : github.com/cilium/ebpf +Version: v0.13.2 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/benbjohnson/clock@v1.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/cilium/ebpf@v0.13.2/LICENSE: -The MIT License (MIT) +MIT License -Copyright (c) 2014 Ben Johnson +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -36839,127 +38659,214 @@ SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/beorn7/perks -Version: v1.0.1 -Licence type (autodetected): MIT +Dependency : github.com/cncf/xds/go +Version: v0.0.0-20240905190251-b4127c9b8d78 +Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/beorn7/perks@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/cncf/xds/go@v0.0.0-20240905190251-b4127c9b8d78/LICENSE: -Copyright (C) 2013 Blake Mizerany + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. + 1. Definitions. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. --------------------------------------------------------------------------------- -Dependency : github.com/bluekeyes/go-gitdiff -Version: v0.7.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -Contents of probable licence file $GOMODCACHE/github.com/bluekeyes/go-gitdiff@v0.7.1/LICENSE: + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -MIT License + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -Copyright (c) 2019 Billy Keyes + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. --------------------------------------------------------------------------------- -Dependency : github.com/cenkalti/backoff/v4 -Version: v4.3.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff/v4@v4.3.0/LICENSE: + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -The MIT License (MIT) + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -Copyright (c) 2014 Cenk Altı + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. --------------------------------------------------------------------------------- -Dependency : github.com/cilium/ebpf -Version: v0.13.2 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + END OF TERMS AND CONDITIONS -Contents of probable licence file $GOMODCACHE/github.com/cilium/ebpf@v0.13.2/LICENSE: + APPENDIX: How to apply the Apache License to your work. -MIT License + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -Copyright (c) 2017 Nathan Sweet -Copyright (c) 2018, 2019 Cloudflare -Copyright (c) 2019 Authors of Cilium + Copyright [yyyy] [name of copyright owner] -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + http://www.apache.org/licenses/LICENSE-2.0 -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- @@ -37677,15 +39584,470 @@ Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0. -------------------------------------------------------------------------------- -Dependency : github.com/dgryski/go-farm -Version: v0.0.0-20200201041132-a6ae2369ad13 +Dependency : github.com/dgryski/go-farm +Version: v0.0.0-20200201041132-a6ae2369ad13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-farm@v0.0.0-20200201041132-a6ae2369ad13/LICENSE: + +Copyright (c) 2014-2017 Damian Gryski +Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/dimchansky/utfbom +Version: v1.1.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dimchansky/utfbom@v1.1.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/distribution/reference +Version: v0.6.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/distribution/reference@v0.6.0/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/dlclark/regexp2 +Version: v1.4.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-farm@v0.0.0-20200201041132-a6ae2369ad13/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/dlclark/regexp2@v1.4.0/LICENSE: -Copyright (c) 2014-2017 Damian Gryski -Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com +The MIT License (MIT) + +Copyright (c) Doug Clark Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -37694,30 +40056,68 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/dnephin/pflag +Version: v1.0.7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dnephin/pflag@v1.0.7/LICENSE: +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/dimchansky/utfbom -Version: v1.1.0 +Dependency : github.com/docker/go-metrics +Version: v0.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dimchansky/utfbom@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0.1/LICENSE: + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -37892,24 +40292,13 @@ Contents of probable licence file $GOMODCACHE/github.com/dimchansky/utfbom@v1.1. END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -37919,14 +40308,75 @@ Contents of probable licence file $GOMODCACHE/github.com/dimchansky/utfbom@v1.1. -------------------------------------------------------------------------------- -Dependency : github.com/distribution/reference -Version: v0.6.0 +Dependency : github.com/eapache/go-xerial-snappy +Version: v0.0.0-20180814174437-776d5712da21 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20180814174437-776d5712da21/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/eapache/queue +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/eapache/queue@v1.1.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/elastic-transport-go/v8 +Version: v8.6.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/distribution/reference@v0.6.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.6.0/LICENSE: -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -38106,7 +40556,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -38114,7 +40564,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -38129,88 +40579,18 @@ Apache License limitations under the License. - --------------------------------------------------------------------------------- -Dependency : github.com/dlclark/regexp2 -Version: v1.4.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/dlclark/regexp2@v1.4.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) Doug Clark - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/dnephin/pflag -Version: v1.0.7 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/dnephin/pflag@v1.0.7/LICENSE: - -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- -Dependency : github.com/docker/go-metrics -Version: v0.0.1 +Dependency : github.com/elastic/go-windows +Version: v1.0.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0.2/LICENSE.txt: Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -38385,13 +40765,24 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0. END OF TERMS AND CONDITIONS - Copyright 2013-2016 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -38401,16 +40792,16 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0. -------------------------------------------------------------------------------- -Dependency : github.com/eapache/go-xerial-snappy -Version: v0.0.0-20180814174437-776d5712da21 +Dependency : github.com/elastic/pkcs8 +Version: v1.0.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20180814174437-776d5712da21/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/pkcs8@v1.0.0/LICENSE: The MIT License (MIT) -Copyright (c) 2016 Evan Huus +Copyright (c) 2014 youmark Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -38430,44 +40821,118 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/elazarl/goproxy +Version: v0.0.0-20240909085733-6741dbfc16a1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: + +Copyright (c) 2012 Elazar Leibovich. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Elazar Leibovich. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------------------- -Dependency : github.com/eapache/queue -Version: v1.1.0 +Dependency : github.com/elazarl/goproxy/ext +Version: v0.0.0-20240909085733-6741dbfc16a1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy/ext@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: + +Copyright (c) 2012 Elazar Leibovich. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Elazar Leibovich. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/emicklei/go-restful/v3 +Version: v3.11.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/eapache/queue@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/emicklei/go-restful/v3@v3.11.0/LICENSE: -The MIT License (MIT) +Copyright (c) 2012,2013 Ernest Micklei -Copyright (c) 2014 Evan Huus +MIT License -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-transport-go/v8 -Version: v8.6.0 +Dependency : github.com/envoyproxy/go-control-plane +Version: v0.13.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.6.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/go-control-plane@v0.13.0/LICENSE: Apache License Version 2.0, January 2004 @@ -38649,7 +41114,7 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -38657,7 +41122,7 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -38673,12 +41138,12 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo -------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-windows -Version: v1.0.2 +Dependency : github.com/envoyproxy/protoc-gen-validate +Version: v1.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/protoc-gen-validate@v1.1.0/LICENSE: Apache License @@ -38884,141 +41349,6 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/elastic/pkcs8 -Version: v1.0.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/pkcs8@v1.0.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 youmark - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/elazarl/goproxy -Version: v0.0.0-20240909085733-6741dbfc16a1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: - -Copyright (c) 2012 Elazar Leibovich. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Elazar Leibovich. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/elazarl/goproxy/ext -Version: v0.0.0-20240909085733-6741dbfc16a1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy/ext@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: - -Copyright (c) 2012 Elazar Leibovich. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Elazar Leibovich. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/emicklei/go-restful/v3 -Version: v3.11.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/emicklei/go-restful/v3@v3.11.0/LICENSE: - -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------------------------- Dependency : github.com/evanphx/json-patch Version: v5.6.0+incompatible @@ -43073,11 +45403,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/googleapis/enterprise-certificate-proxy -Version: v0.3.2 +Version: v0.3.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/enterprise-certificate-proxy@v0.3.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/enterprise-certificate-proxy@v0.3.4/LICENSE: Apache License @@ -51559,6 +53889,45 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/planetscale/vtprotobuf +Version: v0.6.1-0.20240319094008-0393e58bdf10 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/planetscale/vtprotobuf@v0.6.1-0.20240319094008-0393e58bdf10/LICENSE: + +Copyright (c) 2021, PlanetScale Inc. All rights reserved. +Copyright (c) 2013, The GoGo Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/pmezard/go-difflib Version: v1.0.1-0.20181226105442-5d4384ee4fb2 @@ -54232,12 +56601,436 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector@v0.1 -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/config/configtelemetry +Dependency : go.opentelemetry.io/collector/config/configtelemetry +Version: v0.109.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/config/configtelemetry@v0.109.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector/consumer/consumerprofiles +Version: v0.109.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumerprofiles@v0.109.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector/consumer/consumertest Version: v0.109.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/config/configtelemetry@v0.109.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumertest@v0.109.0/LICENSE: Apache License @@ -54444,12 +57237,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/conf -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/consumer/consumerprofiles +Dependency : go.opentelemetry.io/collector/pdata/pprofile Version: v0.109.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumerprofiles@v0.109.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdata/pprofile@v0.109.0/LICENSE: Apache License @@ -54656,13 +57449,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/cons -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/consumer/consumertest -Version: v0.109.0 +Dependency : go.opentelemetry.io/contrib/detectors/gcp +Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumertest@v0.109.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/detectors/gcp@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -54868,13 +57660,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/cons -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/pdata/pprofile -Version: v0.109.0 +Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +Version: v0.54.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdata/pprofile@v0.109.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.54.0/LICENSE: Apache License Version 2.0, January 2004 @@ -55080,12 +57871,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdat -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc -Version: v0.49.0 +Dependency : go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +Version: v0.54.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.49.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp@v0.54.0/LICENSE: Apache License Version 2.0, January 2004 @@ -55291,12 +58082,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instru -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -Version: v0.53.0 +Dependency : go.opentelemetry.io/otel +Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp@v0.53.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -55502,12 +58293,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instru -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel -Version: v1.29.0 +Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace +Version: v1.28.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.28.0/LICENSE: Apache License Version 2.0, January 2004 @@ -55713,12 +58504,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.29.0/L -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace +Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp Version: v1.28.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.28.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp@v1.28.0/LICENSE: Apache License Version 2.0, January 2004 @@ -55924,12 +58715,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp -Version: v1.28.0 +Dependency : go.opentelemetry.io/otel/metric +Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp@v1.28.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -56135,12 +58926,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/metric +Dependency : go.opentelemetry.io/otel/sdk Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -56346,12 +59137,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1 -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/sdk +Dependency : go.opentelemetry.io/otel/sdk/metric Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk/metric@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -57144,13 +59935,13 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/xerrors -Version: v0.0.0-20231012003039-104605ab7028 +Version: v0.0.0-20240903120638-7835f813f4da Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20231012003039-104605ab7028/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20240903120638-7835f813f4da/LICENSE: -Copyright (c) 2019 The Go Authors. All rights reserved. +Copyright 2019 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -57162,7 +59953,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -57213,11 +60004,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20240730163845-b1a4ccb954bf +Version: v0.0.0-20240903143218-8af14fe29dc1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20240730163845-b1a4ccb954bf/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20240903143218-8af14fe29dc1/LICENSE: Apache License @@ -57425,11 +60216,223 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0- -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto/googleapis/rpc -Version: v0.0.0-20240822170219-fc7c04adadcd +Version: v0.0.0-20240903143218-8af14fe29dc1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20240903143218-8af14fe29dc1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : google.golang.org/grpc/stats/opentelemetry +Version: v0.0.0-20240907200651-3ffb98b2c93a Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20240822170219-fc7c04adadcd/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc/stats/opentelemetry@v0.0.0-20240907200651-3ffb98b2c93a/LICENSE: Apache License diff --git a/go.mod b/go.mod index 7a5392381a6b..03ea83236240 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/elastic/beats/v7 go 1.22.0 require ( - cloud.google.com/go/bigquery v1.62.0 - cloud.google.com/go/monitoring v1.20.4 - cloud.google.com/go/pubsub v1.41.0 + cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/monitoring v1.21.0 + cloud.google.com/go/pubsub v1.42.0 code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a // indirect @@ -137,15 +137,15 @@ require ( golang.org/x/crypto v0.27.0 golang.org/x/mod v0.21.0 golang.org/x/net v0.29.0 - golang.org/x/oauth2 v0.22.0 + golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.25.0 - google.golang.org/api v0.191.0 - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.66.0 + google.golang.org/api v0.197.0 + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 google.golang.org/protobuf v1.34.2 gopkg.in/inf.v0 v0.9.1 gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect @@ -162,9 +162,9 @@ require ( ) require ( - cloud.google.com/go v0.115.0 - cloud.google.com/go/compute v1.27.4 - cloud.google.com/go/redis v1.16.4 + cloud.google.com/go v0.115.1 + cloud.google.com/go/compute v1.28.0 + cloud.google.com/go/redis v1.17.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 @@ -222,17 +222,18 @@ require ( go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/receiver v0.109.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( aqwari.net/xml v0.0.0-20210331023308-d9421b293817 // indirect - cloud.google.com/go/auth v0.8.0 // indirect + cel.dev/expr v0.16.1 // indirect + cloud.google.com/go/auth v0.9.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/azure-amqp-common-go/v4 v4.2.0 // indirect @@ -246,6 +247,9 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/andybalholm/brotli v1.0.5 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect @@ -266,7 +270,9 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bluekeyes/go-gitdiff v0.7.1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cilium/ebpf v0.13.2 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 // indirect github.com/cyphar/filepath-securejoin v0.2.5 // indirect @@ -283,6 +289,8 @@ require ( github.com/elazarl/goproxy v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -308,7 +316,7 @@ require ( github.com/google/licenseclassifier v0.0.0-20221004142553-c1ed8fcf4bab // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -357,6 +365,7 @@ require ( github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.20.2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect @@ -376,16 +385,20 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.109.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 // indirect golang.org/x/term v0.24.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect @@ -398,7 +411,7 @@ require ( ) require ( - cloud.google.com/go/storage v1.43.0 + cloud.google.com/go/storage v1.44.0 github.com/PaloAltoNetworks/pango v0.10.2 github.com/dlclark/regexp2 v1.4.0 // indirect github.com/google/gofuzz v1.2.0 // indirect diff --git a/go.sum b/go.sum index 495e0074d195..63a740c46dd7 100644 --- a/go.sum +++ b/go.sum @@ -1,35 +1,41 @@ aqwari.net/xml v0.0.0-20210331023308-d9421b293817 h1:+3Rh5EaTzNLnzWx3/uy/mAaH/dGI7svJ6e0oOIDcPuE= aqwari.net/xml v0.0.0-20210331023308-d9421b293817/go.mod h1:c7kkWzc7HS/t8Q2DcVY8P2d1dyWNEhEVT5pL0ZHO11c= +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.8.0 h1:y8jUJLl/Fg+qNBWxP/Hox2ezJvjkrPb952PC1p0G6A4= -cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/bigquery v1.62.0 h1:SYEA2f7fKqbSRRBHb7g0iHTtZvtPSPYdXfmqsjpsBwo= -cloud.google.com/go/bigquery v1.62.0/go.mod h1:5ee+ZkF1x/ntgCsFQJAQTM3QkAZOecfCmvxhkJsWRSA= -cloud.google.com/go/compute v1.27.4 h1:XM8ulx6crjdl09XBfji7viFgZOEQuIxBwKmjRH9Rtmc= -cloud.google.com/go/compute v1.27.4/go.mod h1:7JZS+h21ERAGHOy5qb7+EPyXlQwzshzrx1x6L9JhTqU= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/datacatalog v1.20.5 h1:Cosg/L60myEbpP1HoNv77ykV7zWe7hqSwY4uUDmhx/I= -cloud.google.com/go/datacatalog v1.20.5/go.mod h1:DB0QWF9nelpsbB0eR/tA0xbHZZMvpoFD1XFy3Qv/McI= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= -cloud.google.com/go/kms v1.18.4 h1:dYN3OCsQ6wJLLtOnI8DGUwQ5shMusXsWCCC+s09ATsk= -cloud.google.com/go/kms v1.18.4/go.mod h1:SG1bgQ3UWW6/KdPo9uuJnzELXY5YTTMJtDYvajiQ22g= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= -cloud.google.com/go/monitoring v1.20.4 h1:zwcViK7mT9SV0kzKqLOI3spRadvsmvw/R9z1MHNeC0E= -cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= -cloud.google.com/go/pubsub v1.41.0 h1:ZPaM/CvTO6T+1tQOs/jJ4OEMpjtel0PTLV7j1JK+ZrI= -cloud.google.com/go/pubsub v1.41.0/go.mod h1:g+YzC6w/3N91tzG66e2BZtp7WrpBBMXVa3Y9zVoOGpk= -cloud.google.com/go/redis v1.16.4 h1:9CO6EcuM9/CpgtcjG6JZV+GFw3oDrRfwLwmvwo/uM1o= -cloud.google.com/go/redis v1.16.4/go.mod h1:unCVfLP5eFrVhGLDnb7IaSaWxuZ+7cBgwwBwbdG9m9w= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= +cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/compute v1.28.0 h1:OPtBxMcheSS+DWfci803qvPly3d4w7Eu5ztKBcFfzwk= +cloud.google.com/go/compute v1.28.0/go.mod h1:DEqZBtYrDnD5PvjsKwb3onnhX+qjdCVM7eshj1XdjV4= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/datacatalog v1.22.1 h1:i0DyKb/o7j+0vgaFtimcRFjYsD6wFw1jpnODYUyiYRs= +cloud.google.com/go/datacatalog v1.22.1/go.mod h1:MscnJl9B2lpYlFoxRjicw19kFTwEke8ReKL5Y/6TWg8= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= +cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= +cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= +cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= +cloud.google.com/go/redis v1.17.0 h1:YItghJ0VY98gJperCaTVEe7g+QZWz1nsN5ioJcSxkDY= +cloud.google.com/go/redis v1.17.0/go.mod h1:pzTdaIhriMLiXu8nn2CgiS52SYko0tO1Du4d3MPOG5I= +cloud.google.com/go/storage v1.44.0 h1:abBzXf4UJKMmQ04xxJf9dYM/fNl24KHoTuBjyJDX2AI= +cloud.google.com/go/storage v1.44.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= +cloud.google.com/go/trace v1.11.0 h1:UHX6cOJm45Zw/KIbqHe4kII8PupLt/V5tscZUkeiJVI= +cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+BIiGf+RjgcXLm3NoZaLIJsBXJuUa63Lx8= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunOIA4OODh7djZbk48qqbowNFI= @@ -113,6 +119,14 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzS github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -247,6 +261,8 @@ github.com/cavaliergopher/rpm v1.2.0/go.mod h1:R0q3vTqa7RUvPofAZYrnjJ63hh2vngjFf github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.13.2 h1:uhLimLX+jF9BTPPvoCUYh/mBeoONkjgaJ9w9fn0mRj4= @@ -259,6 +275,8 @@ github.com/cloudfoundry/noaa v2.1.0+incompatible/go.mod h1:5LmacnptvxzrTvMfL9+EJ github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI= github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4/go.mod h1:GS0pCHd7onIsewbw8Ue9qa9pZPv2V88cUZDttK6KzgI= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= @@ -398,7 +416,11 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -559,8 +581,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -793,6 +815,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -954,10 +978,12 @@ go.opentelemetry.io/collector/pdata/pprofile v0.109.0 h1:5lobQKeHk8p4WC7KYbzL6Zq go.opentelemetry.io/collector/pdata/pprofile v0.109.0/go.mod h1:lXIifCdtR5ewO17JAYTUsclMqRp6h6dCowoXHhGyw8Y= go.opentelemetry.io/collector/receiver v0.109.0 h1:DTOM7xaDl7FUGQIjvjmWZn03JUE+aG4mJzWWfb7S8zw= go.opentelemetry.io/collector/receiver v0.109.0/go.mod h1:jeiCHaf3PE6aXoZfHF5Uexg7aztu+Vkn9LVw0YDKm6g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= @@ -968,6 +994,8 @@ go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2 go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -1063,8 +1091,8 @@ golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1183,30 +1211,32 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= -google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 764ba96d34d0225e2068c9c38b102cfb771eaf12 Mon Sep 17 00:00:00 2001 From: "Alex K." <8418476+fearful-symmetry@users.noreply.github.com> Date: Fri, 11 Oct 2024 08:25:45 -0700 Subject: [PATCH 26/90] Create nil cgroups reader for processor, improve error handling (#41198) * create nil reader, improve error handling * add test, clean up * linter --- .../add_docker_metadata/add_docker_metadata.go | 11 ++++++----- .../add_process_metadata.go | 4 +++- .../add_process_metadata_test.go | 18 +++++++++++++++++- .../gosigar_cid_provider.go | 5 +---- libbeat/processors/cgroups.go | 11 +++++++++++ 5 files changed, 38 insertions(+), 11 deletions(-) diff --git a/libbeat/processors/add_docker_metadata/add_docker_metadata.go b/libbeat/processors/add_docker_metadata/add_docker_metadata.go index c1b0afeb9fa4..73ee82c41436 100644 --- a/libbeat/processors/add_docker_metadata/add_docker_metadata.go +++ b/libbeat/processors/add_docker_metadata/add_docker_metadata.go @@ -113,7 +113,9 @@ func buildDockerMetadataProcessor(log *logp.Logger, cfg *conf.C, watcherConstruc } reader, err := initCgroupPaths(resolve.NewTestResolver(config.HostFS), false) - if err != nil && !errors.Is(err, cgroup.ErrCgroupsMissing) { + if errors.Is(err, cgroup.ErrCgroupsMissing) { + reader = &processors.NilCGReader{} + } else if err != nil { return nil, fmt.Errorf("error creating cgroup reader: %w", err) } @@ -284,14 +286,13 @@ func (d *addDockerMetadata) getProcessCgroups(pid int) (cgroup.PathList, error) return cgroups, nil } - if d.cgreader == nil { - return cgroups, fs.ErrNotExist - } cgroups, err := d.cgreader.ProcessCgroupPaths(pid) if err != nil { return cgroups, fmt.Errorf("failed to read cgroups for pid=%v: %w", pid, err) } - + if len(cgroups.Flatten()) == 0 { + return cgroup.PathList{}, fs.ErrNotExist + } d.cgroups.Put(pid, cgroups) return cgroups, nil } diff --git a/libbeat/processors/add_process_metadata/add_process_metadata.go b/libbeat/processors/add_process_metadata/add_process_metadata.go index 6e88341993f9..2385e5f99def 100644 --- a/libbeat/processors/add_process_metadata/add_process_metadata.go +++ b/libbeat/processors/add_process_metadata/add_process_metadata.go @@ -164,7 +164,9 @@ func newProcessMetadataProcessorWithProvider(config config, provider processMeta } reader, err := initCgroupPaths(resolve.NewTestResolver(config.HostPath), false) - if err != nil && !errors.Is(err, cgroup.ErrCgroupsMissing) { + if errors.Is(err, cgroup.ErrCgroupsMissing) { + reader = &processors.NilCGReader{} + } else if err != nil { return nil, fmt.Errorf("error creating cgroup reader: %w", err) } diff --git a/libbeat/processors/add_process_metadata/add_process_metadata_test.go b/libbeat/processors/add_process_metadata/add_process_metadata_test.go index 128b70a3d3c5..d74297262cb2 100644 --- a/libbeat/processors/add_process_metadata/add_process_metadata_test.go +++ b/libbeat/processors/add_process_metadata/add_process_metadata_test.go @@ -54,6 +54,22 @@ func newCGHandlerBuilder(handler testCGRsolver) processors.InitCgroupHandler { } } +func TestNilProcessor(t *testing.T) { + initCgroupPaths = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (processors.CGReader, error) { + return &processors.NilCGReader{}, nil + } + + proc, err := newProcessMetadataProcessorWithProvider(defaultConfig(), &procCache, false) + require.NoError(t, err) + + // make sure a nil cgroup reader doesn't blow anything up + unwrapped, _ := proc.(*addProcessMetadata) + metadata, err := unwrapped.provider.GetProcessMetadata(os.Getpid()) + require.NoError(t, err) + require.NotNil(t, metadata) + +} + func TestDefaultProcessorStartup(t *testing.T) { // set initCgroupPaths to system non-test defaults initCgroupPaths = func(rootfsMountpoint resolve.Resolver, ignoreRootCgroups bool) (processors.CGReader, error) { @@ -67,7 +83,7 @@ func TestDefaultProcessorStartup(t *testing.T) { unwrapped, _ := proc.(*addProcessMetadata) metadata, err := unwrapped.provider.GetProcessMetadata(os.Getpid()) require.NoError(t, err) - require.NotNil(t, metadata) + require.NotNil(t, metadata.fields) } func TestAddProcessMetadata(t *testing.T) { diff --git a/libbeat/processors/add_process_metadata/gosigar_cid_provider.go b/libbeat/processors/add_process_metadata/gosigar_cid_provider.go index d1f09c5fd855..bd16e62f9666 100644 --- a/libbeat/processors/add_process_metadata/gosigar_cid_provider.go +++ b/libbeat/processors/add_process_metadata/gosigar_cid_provider.go @@ -55,7 +55,7 @@ func (p gosigarCidProvider) GetCid(pid int) (result string, err error) { cgroups, err := p.getProcessCgroups(pid) if err != nil { - p.log.Debugf("failed to get cgroups for pid=%v: %v", pid, err) + return "", fmt.Errorf("failed to get cgroups for pid=%v: %w", pid, err) } cid = p.getContainerID(cgroups) @@ -81,9 +81,6 @@ func newCidProvider(cgroupPrefixes []string, cgroupRegex *regexp.Regexp, process // returns an error if it failed to retrieve the cgroup info. func (p gosigarCidProvider) getProcessCgroups(pid int) (cgroup.PathList, error) { //return nil if we aren't supporting cgroups - if p.processCgroupPaths == nil { - return cgroup.PathList{}, nil - } pathList, err := p.processCgroupPaths.ProcessCgroupPaths(pid) if err != nil { return cgroup.PathList{}, fmt.Errorf("failed to read cgroups for pid=%v: %w", pid, err) diff --git a/libbeat/processors/cgroups.go b/libbeat/processors/cgroups.go index 8e54ae5535bf..25d67fbe41a7 100644 --- a/libbeat/processors/cgroups.go +++ b/libbeat/processors/cgroups.go @@ -18,6 +18,8 @@ package processors import ( + "io/fs" + "github.com/elastic/elastic-agent-system-metrics/metric/system/cgroup" "github.com/elastic/elastic-agent-system-metrics/metric/system/resolve" ) @@ -30,3 +32,12 @@ type InitCgroupHandler = func(rootfsMountpoint resolve.Resolver, ignoreRootCgrou type CGReader interface { ProcessCgroupPaths(pid int) (cgroup.PathList, error) } + +// NilCGReader does nothing +type NilCGReader struct { +} + +// ProcessCgroupPaths returns a blank pathLists and fs.ErrNotExist +func (*NilCGReader) ProcessCgroupPaths(_ int) (cgroup.PathList, error) { + return cgroup.PathList{}, fs.ErrNotExist +} From 23f14f1f18f13d4eaacff1f7f4d73b829ae5c111 Mon Sep 17 00:00:00 2001 From: kruskall <99559985+kruskall@users.noreply.github.com> Date: Fri, 11 Oct 2024 20:46:46 +0200 Subject: [PATCH 27/90] feat(metricbeat): initial windows arm support (#41039) resolve some of the compile errors on windows arm the remaining compile errors will be fixed once elastic/elastic-agent-system-metrics is bumped --- .../windows/pdh/defs_pdh_windows_arm.go | 217 ++++++++++++++++++ .../windows/pdh/defs_pdh_windows_arm64.go | 217 ++++++++++++++++++ metricbeat/helper/windows/pdh/doc.go | 4 +- .../service/defs_service_windows_arm.go | 206 +++++++++++++++++ .../service/defs_service_windows_arm64.go | 209 +++++++++++++++++ metricbeat/module/windows/service/doc.go | 4 +- 6 files changed, 855 insertions(+), 2 deletions(-) create mode 100644 metricbeat/helper/windows/pdh/defs_pdh_windows_arm.go create mode 100644 metricbeat/helper/windows/pdh/defs_pdh_windows_arm64.go create mode 100644 metricbeat/module/windows/service/defs_service_windows_arm.go create mode 100644 metricbeat/module/windows/service/defs_service_windows_arm64.go diff --git a/metricbeat/helper/windows/pdh/defs_pdh_windows_arm.go b/metricbeat/helper/windows/pdh/defs_pdh_windows_arm.go new file mode 100644 index 000000000000..e794050dcf55 --- /dev/null +++ b/metricbeat/helper/windows/pdh/defs_pdh_windows_arm.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo.exe -godefs defs_pdh_windows.go + +package pdh + +type PdhErrno uintptr + +const ( + PDH_CSTATUS_VALID_DATA PdhErrno = 0x0 + PDH_CSTATUS_NEW_DATA PdhErrno = 0x1 + PDH_CSTATUS_NO_MACHINE PdhErrno = 0x800007d0 + PDH_CSTATUS_NO_INSTANCE PdhErrno = 0x800007d1 + PDH_MORE_DATA PdhErrno = 0x800007d2 + PDH_CSTATUS_ITEM_NOT_VALIDATED PdhErrno = 0x800007d3 + PDH_RETRY PdhErrno = 0x800007d4 + PDH_NO_DATA PdhErrno = 0x800007d5 + PDH_CALC_NEGATIVE_DENOMINATOR PdhErrno = 0x800007d6 + PDH_CALC_NEGATIVE_TIMEBASE PdhErrno = 0x800007d7 + PDH_CALC_NEGATIVE_VALUE PdhErrno = 0x800007d8 + PDH_DIALOG_CANCELLED PdhErrno = 0x800007d9 + PDH_END_OF_LOG_FILE PdhErrno = 0x800007da + PDH_ASYNC_QUERY_TIMEOUT PdhErrno = 0x800007db + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE PdhErrno = 0x800007dc + PDH_CSTATUS_NO_OBJECT PdhErrno = 0xc0000bb8 + PDH_CSTATUS_NO_COUNTER PdhErrno = 0xc0000bb9 + PDH_CSTATUS_INVALID_DATA PdhErrno = 0xc0000bba + PDH_MEMORY_ALLOCATION_FAILURE PdhErrno = 0xc0000bbb + PDH_INVALID_HANDLE PdhErrno = 0xc0000bbc + PDH_INVALID_ARGUMENT PdhErrno = 0xc0000bbd + PDH_FUNCTION_NOT_FOUND PdhErrno = 0xc0000bbe + PDH_CSTATUS_NO_COUNTERNAME PdhErrno = 0xc0000bbf + PDH_CSTATUS_BAD_COUNTERNAME PdhErrno = 0xc0000bc0 + PDH_INVALID_BUFFER PdhErrno = 0xc0000bc1 + PDH_INSUFFICIENT_BUFFER PdhErrno = 0xc0000bc2 + PDH_CANNOT_CONNECT_MACHINE PdhErrno = 0xc0000bc3 + PDH_INVALID_PATH PdhErrno = 0xc0000bc4 + PDH_INVALID_INSTANCE PdhErrno = 0xc0000bc5 + PDH_INVALID_DATA PdhErrno = 0xc0000bc6 + PDH_NO_DIALOG_DATA PdhErrno = 0xc0000bc7 + PDH_CANNOT_READ_NAME_STRINGS PdhErrno = 0xc0000bc8 + PDH_LOG_FILE_CREATE_ERROR PdhErrno = 0xc0000bc9 + PDH_LOG_FILE_OPEN_ERROR PdhErrno = 0xc0000bca + PDH_LOG_TYPE_NOT_FOUND PdhErrno = 0xc0000bcb + PDH_NO_MORE_DATA PdhErrno = 0xc0000bcc + PDH_ENTRY_NOT_IN_LOG_FILE PdhErrno = 0xc0000bcd + PDH_DATA_SOURCE_IS_LOG_FILE PdhErrno = 0xc0000bce + PDH_DATA_SOURCE_IS_REAL_TIME PdhErrno = 0xc0000bcf + PDH_UNABLE_READ_LOG_HEADER PdhErrno = 0xc0000bd0 + PDH_FILE_NOT_FOUND PdhErrno = 0xc0000bd1 + PDH_FILE_ALREADY_EXISTS PdhErrno = 0xc0000bd2 + PDH_NOT_IMPLEMENTED PdhErrno = 0xc0000bd3 + PDH_STRING_NOT_FOUND PdhErrno = 0xc0000bd4 + PDH_UNABLE_MAP_NAME_FILES PdhErrno = 0x80000bd5 + PDH_UNKNOWN_LOG_FORMAT PdhErrno = 0xc0000bd6 + PDH_UNKNOWN_LOGSVC_COMMAND PdhErrno = 0xc0000bd7 + PDH_LOGSVC_QUERY_NOT_FOUND PdhErrno = 0xc0000bd8 + PDH_LOGSVC_NOT_OPENED PdhErrno = 0xc0000bd9 + PDH_WBEM_ERROR PdhErrno = 0xc0000bda + PDH_ACCESS_DENIED PdhErrno = 0xc0000bdb + PDH_LOG_FILE_TOO_SMALL PdhErrno = 0xc0000bdc + PDH_INVALID_DATASOURCE PdhErrno = 0xc0000bdd + PDH_INVALID_SQLDB PdhErrno = 0xc0000bde + PDH_NO_COUNTERS PdhErrno = 0xc0000bdf + PDH_SQL_ALLOC_FAILED PdhErrno = 0xc0000be0 + PDH_SQL_ALLOCCON_FAILED PdhErrno = 0xc0000be1 + PDH_SQL_EXEC_DIRECT_FAILED PdhErrno = 0xc0000be2 + PDH_SQL_FETCH_FAILED PdhErrno = 0xc0000be3 + PDH_SQL_ROWCOUNT_FAILED PdhErrno = 0xc0000be4 + PDH_SQL_MORE_RESULTS_FAILED PdhErrno = 0xc0000be5 + PDH_SQL_CONNECT_FAILED PdhErrno = 0xc0000be6 + PDH_SQL_BIND_FAILED PdhErrno = 0xc0000be7 + PDH_CANNOT_CONNECT_WMI_SERVER PdhErrno = 0xc0000be8 + PDH_PLA_COLLECTION_ALREADY_RUNNING PdhErrno = 0xc0000be9 + PDH_PLA_ERROR_SCHEDULE_OVERLAP PdhErrno = 0xc0000bea + PDH_PLA_COLLECTION_NOT_FOUND PdhErrno = 0xc0000beb + PDH_PLA_ERROR_SCHEDULE_ELAPSED PdhErrno = 0xc0000bec + PDH_PLA_ERROR_NOSTART PdhErrno = 0xc0000bed + PDH_PLA_ERROR_ALREADY_EXISTS PdhErrno = 0xc0000bee + PDH_PLA_ERROR_TYPE_MISMATCH PdhErrno = 0xc0000bef + PDH_PLA_ERROR_FILEPATH PdhErrno = 0xc0000bf0 + PDH_PLA_SERVICE_ERROR PdhErrno = 0xc0000bf1 + PDH_PLA_VALIDATION_ERROR PdhErrno = 0xc0000bf2 + PDH_PLA_VALIDATION_WARNING PdhErrno = 0x80000bf3 + PDH_PLA_ERROR_NAME_TOO_LONG PdhErrno = 0xc0000bf4 + PDH_INVALID_SQL_LOG_FORMAT PdhErrno = 0xc0000bf5 + PDH_COUNTER_ALREADY_IN_QUERY PdhErrno = 0xc0000bf6 + PDH_BINARY_LOG_CORRUPT PdhErrno = 0xc0000bf7 + PDH_LOG_SAMPLE_TOO_SMALL PdhErrno = 0xc0000bf8 + PDH_OS_LATER_VERSION PdhErrno = 0xc0000bf9 + PDH_OS_EARLIER_VERSION PdhErrno = 0xc0000bfa + PDH_INCORRECT_APPEND_TIME PdhErrno = 0xc0000bfb + PDH_UNMATCHED_APPEND_COUNTER PdhErrno = 0xc0000bfc + PDH_SQL_ALTER_DETAIL_FAILED PdhErrno = 0xc0000bfd + PDH_QUERY_PERF_DATA_TIMEOUT PdhErrno = 0xc0000bfe +) + +var pdhErrors = map[PdhErrno]struct{}{ + PDH_CSTATUS_VALID_DATA: struct{}{}, + PDH_CSTATUS_NEW_DATA: struct{}{}, + PDH_CSTATUS_NO_MACHINE: struct{}{}, + PDH_CSTATUS_NO_INSTANCE: struct{}{}, + PDH_MORE_DATA: struct{}{}, + PDH_CSTATUS_ITEM_NOT_VALIDATED: struct{}{}, + PDH_RETRY: struct{}{}, + PDH_NO_DATA: struct{}{}, + PDH_CALC_NEGATIVE_DENOMINATOR: struct{}{}, + PDH_CALC_NEGATIVE_TIMEBASE: struct{}{}, + PDH_CALC_NEGATIVE_VALUE: struct{}{}, + PDH_DIALOG_CANCELLED: struct{}{}, + PDH_END_OF_LOG_FILE: struct{}{}, + PDH_ASYNC_QUERY_TIMEOUT: struct{}{}, + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE: struct{}{}, + PDH_CSTATUS_NO_OBJECT: struct{}{}, + PDH_CSTATUS_NO_COUNTER: struct{}{}, + PDH_CSTATUS_INVALID_DATA: struct{}{}, + PDH_MEMORY_ALLOCATION_FAILURE: struct{}{}, + PDH_INVALID_HANDLE: struct{}{}, + PDH_INVALID_ARGUMENT: struct{}{}, + PDH_FUNCTION_NOT_FOUND: struct{}{}, + PDH_CSTATUS_NO_COUNTERNAME: struct{}{}, + PDH_CSTATUS_BAD_COUNTERNAME: struct{}{}, + PDH_INVALID_BUFFER: struct{}{}, + PDH_INSUFFICIENT_BUFFER: struct{}{}, + PDH_CANNOT_CONNECT_MACHINE: struct{}{}, + PDH_INVALID_PATH: struct{}{}, + PDH_INVALID_INSTANCE: struct{}{}, + PDH_INVALID_DATA: struct{}{}, + PDH_NO_DIALOG_DATA: struct{}{}, + PDH_CANNOT_READ_NAME_STRINGS: struct{}{}, + PDH_LOG_FILE_CREATE_ERROR: struct{}{}, + PDH_LOG_FILE_OPEN_ERROR: struct{}{}, + PDH_LOG_TYPE_NOT_FOUND: struct{}{}, + PDH_NO_MORE_DATA: struct{}{}, + PDH_ENTRY_NOT_IN_LOG_FILE: struct{}{}, + PDH_DATA_SOURCE_IS_LOG_FILE: struct{}{}, + PDH_DATA_SOURCE_IS_REAL_TIME: struct{}{}, + PDH_UNABLE_READ_LOG_HEADER: struct{}{}, + PDH_FILE_NOT_FOUND: struct{}{}, + PDH_FILE_ALREADY_EXISTS: struct{}{}, + PDH_NOT_IMPLEMENTED: struct{}{}, + PDH_STRING_NOT_FOUND: struct{}{}, + PDH_UNABLE_MAP_NAME_FILES: struct{}{}, + PDH_UNKNOWN_LOG_FORMAT: struct{}{}, + PDH_UNKNOWN_LOGSVC_COMMAND: struct{}{}, + PDH_LOGSVC_QUERY_NOT_FOUND: struct{}{}, + PDH_LOGSVC_NOT_OPENED: struct{}{}, + PDH_WBEM_ERROR: struct{}{}, + PDH_ACCESS_DENIED: struct{}{}, + PDH_LOG_FILE_TOO_SMALL: struct{}{}, + PDH_INVALID_DATASOURCE: struct{}{}, + PDH_INVALID_SQLDB: struct{}{}, + PDH_NO_COUNTERS: struct{}{}, + PDH_SQL_ALLOC_FAILED: struct{}{}, + PDH_SQL_ALLOCCON_FAILED: struct{}{}, + PDH_SQL_EXEC_DIRECT_FAILED: struct{}{}, + PDH_SQL_FETCH_FAILED: struct{}{}, + PDH_SQL_ROWCOUNT_FAILED: struct{}{}, + PDH_SQL_MORE_RESULTS_FAILED: struct{}{}, + PDH_SQL_CONNECT_FAILED: struct{}{}, + PDH_SQL_BIND_FAILED: struct{}{}, + PDH_CANNOT_CONNECT_WMI_SERVER: struct{}{}, + PDH_PLA_COLLECTION_ALREADY_RUNNING: struct{}{}, + PDH_PLA_ERROR_SCHEDULE_OVERLAP: struct{}{}, + PDH_PLA_COLLECTION_NOT_FOUND: struct{}{}, + PDH_PLA_ERROR_SCHEDULE_ELAPSED: struct{}{}, + PDH_PLA_ERROR_NOSTART: struct{}{}, + PDH_PLA_ERROR_ALREADY_EXISTS: struct{}{}, + PDH_PLA_ERROR_TYPE_MISMATCH: struct{}{}, + PDH_PLA_ERROR_FILEPATH: struct{}{}, + PDH_PLA_SERVICE_ERROR: struct{}{}, + PDH_PLA_VALIDATION_ERROR: struct{}{}, + PDH_PLA_VALIDATION_WARNING: struct{}{}, + PDH_PLA_ERROR_NAME_TOO_LONG: struct{}{}, + PDH_INVALID_SQL_LOG_FORMAT: struct{}{}, + PDH_COUNTER_ALREADY_IN_QUERY: struct{}{}, + PDH_BINARY_LOG_CORRUPT: struct{}{}, + PDH_LOG_SAMPLE_TOO_SMALL: struct{}{}, + PDH_OS_LATER_VERSION: struct{}{}, + PDH_OS_EARLIER_VERSION: struct{}{}, + PDH_INCORRECT_APPEND_TIME: struct{}{}, + PDH_UNMATCHED_APPEND_COUNTER: struct{}{}, + PDH_SQL_ALTER_DETAIL_FAILED: struct{}{}, + PDH_QUERY_PERF_DATA_TIMEOUT: struct{}{}, +} + +type PdhCounterFormat uint32 + +const ( + PdhFmtDouble PdhCounterFormat = 0x200 + + PdhFmtLarge PdhCounterFormat = 0x400 + + PdhFmtLong PdhCounterFormat = 0x100 + + PdhFmtNoScale PdhCounterFormat = 0x1000 + + PdhFmtNoCap100 PdhCounterFormat = 0x8000 + + PdhFmtMultiply1000 PdhCounterFormat = 0x2000 +) diff --git a/metricbeat/helper/windows/pdh/defs_pdh_windows_arm64.go b/metricbeat/helper/windows/pdh/defs_pdh_windows_arm64.go new file mode 100644 index 000000000000..e794050dcf55 --- /dev/null +++ b/metricbeat/helper/windows/pdh/defs_pdh_windows_arm64.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo.exe -godefs defs_pdh_windows.go + +package pdh + +type PdhErrno uintptr + +const ( + PDH_CSTATUS_VALID_DATA PdhErrno = 0x0 + PDH_CSTATUS_NEW_DATA PdhErrno = 0x1 + PDH_CSTATUS_NO_MACHINE PdhErrno = 0x800007d0 + PDH_CSTATUS_NO_INSTANCE PdhErrno = 0x800007d1 + PDH_MORE_DATA PdhErrno = 0x800007d2 + PDH_CSTATUS_ITEM_NOT_VALIDATED PdhErrno = 0x800007d3 + PDH_RETRY PdhErrno = 0x800007d4 + PDH_NO_DATA PdhErrno = 0x800007d5 + PDH_CALC_NEGATIVE_DENOMINATOR PdhErrno = 0x800007d6 + PDH_CALC_NEGATIVE_TIMEBASE PdhErrno = 0x800007d7 + PDH_CALC_NEGATIVE_VALUE PdhErrno = 0x800007d8 + PDH_DIALOG_CANCELLED PdhErrno = 0x800007d9 + PDH_END_OF_LOG_FILE PdhErrno = 0x800007da + PDH_ASYNC_QUERY_TIMEOUT PdhErrno = 0x800007db + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE PdhErrno = 0x800007dc + PDH_CSTATUS_NO_OBJECT PdhErrno = 0xc0000bb8 + PDH_CSTATUS_NO_COUNTER PdhErrno = 0xc0000bb9 + PDH_CSTATUS_INVALID_DATA PdhErrno = 0xc0000bba + PDH_MEMORY_ALLOCATION_FAILURE PdhErrno = 0xc0000bbb + PDH_INVALID_HANDLE PdhErrno = 0xc0000bbc + PDH_INVALID_ARGUMENT PdhErrno = 0xc0000bbd + PDH_FUNCTION_NOT_FOUND PdhErrno = 0xc0000bbe + PDH_CSTATUS_NO_COUNTERNAME PdhErrno = 0xc0000bbf + PDH_CSTATUS_BAD_COUNTERNAME PdhErrno = 0xc0000bc0 + PDH_INVALID_BUFFER PdhErrno = 0xc0000bc1 + PDH_INSUFFICIENT_BUFFER PdhErrno = 0xc0000bc2 + PDH_CANNOT_CONNECT_MACHINE PdhErrno = 0xc0000bc3 + PDH_INVALID_PATH PdhErrno = 0xc0000bc4 + PDH_INVALID_INSTANCE PdhErrno = 0xc0000bc5 + PDH_INVALID_DATA PdhErrno = 0xc0000bc6 + PDH_NO_DIALOG_DATA PdhErrno = 0xc0000bc7 + PDH_CANNOT_READ_NAME_STRINGS PdhErrno = 0xc0000bc8 + PDH_LOG_FILE_CREATE_ERROR PdhErrno = 0xc0000bc9 + PDH_LOG_FILE_OPEN_ERROR PdhErrno = 0xc0000bca + PDH_LOG_TYPE_NOT_FOUND PdhErrno = 0xc0000bcb + PDH_NO_MORE_DATA PdhErrno = 0xc0000bcc + PDH_ENTRY_NOT_IN_LOG_FILE PdhErrno = 0xc0000bcd + PDH_DATA_SOURCE_IS_LOG_FILE PdhErrno = 0xc0000bce + PDH_DATA_SOURCE_IS_REAL_TIME PdhErrno = 0xc0000bcf + PDH_UNABLE_READ_LOG_HEADER PdhErrno = 0xc0000bd0 + PDH_FILE_NOT_FOUND PdhErrno = 0xc0000bd1 + PDH_FILE_ALREADY_EXISTS PdhErrno = 0xc0000bd2 + PDH_NOT_IMPLEMENTED PdhErrno = 0xc0000bd3 + PDH_STRING_NOT_FOUND PdhErrno = 0xc0000bd4 + PDH_UNABLE_MAP_NAME_FILES PdhErrno = 0x80000bd5 + PDH_UNKNOWN_LOG_FORMAT PdhErrno = 0xc0000bd6 + PDH_UNKNOWN_LOGSVC_COMMAND PdhErrno = 0xc0000bd7 + PDH_LOGSVC_QUERY_NOT_FOUND PdhErrno = 0xc0000bd8 + PDH_LOGSVC_NOT_OPENED PdhErrno = 0xc0000bd9 + PDH_WBEM_ERROR PdhErrno = 0xc0000bda + PDH_ACCESS_DENIED PdhErrno = 0xc0000bdb + PDH_LOG_FILE_TOO_SMALL PdhErrno = 0xc0000bdc + PDH_INVALID_DATASOURCE PdhErrno = 0xc0000bdd + PDH_INVALID_SQLDB PdhErrno = 0xc0000bde + PDH_NO_COUNTERS PdhErrno = 0xc0000bdf + PDH_SQL_ALLOC_FAILED PdhErrno = 0xc0000be0 + PDH_SQL_ALLOCCON_FAILED PdhErrno = 0xc0000be1 + PDH_SQL_EXEC_DIRECT_FAILED PdhErrno = 0xc0000be2 + PDH_SQL_FETCH_FAILED PdhErrno = 0xc0000be3 + PDH_SQL_ROWCOUNT_FAILED PdhErrno = 0xc0000be4 + PDH_SQL_MORE_RESULTS_FAILED PdhErrno = 0xc0000be5 + PDH_SQL_CONNECT_FAILED PdhErrno = 0xc0000be6 + PDH_SQL_BIND_FAILED PdhErrno = 0xc0000be7 + PDH_CANNOT_CONNECT_WMI_SERVER PdhErrno = 0xc0000be8 + PDH_PLA_COLLECTION_ALREADY_RUNNING PdhErrno = 0xc0000be9 + PDH_PLA_ERROR_SCHEDULE_OVERLAP PdhErrno = 0xc0000bea + PDH_PLA_COLLECTION_NOT_FOUND PdhErrno = 0xc0000beb + PDH_PLA_ERROR_SCHEDULE_ELAPSED PdhErrno = 0xc0000bec + PDH_PLA_ERROR_NOSTART PdhErrno = 0xc0000bed + PDH_PLA_ERROR_ALREADY_EXISTS PdhErrno = 0xc0000bee + PDH_PLA_ERROR_TYPE_MISMATCH PdhErrno = 0xc0000bef + PDH_PLA_ERROR_FILEPATH PdhErrno = 0xc0000bf0 + PDH_PLA_SERVICE_ERROR PdhErrno = 0xc0000bf1 + PDH_PLA_VALIDATION_ERROR PdhErrno = 0xc0000bf2 + PDH_PLA_VALIDATION_WARNING PdhErrno = 0x80000bf3 + PDH_PLA_ERROR_NAME_TOO_LONG PdhErrno = 0xc0000bf4 + PDH_INVALID_SQL_LOG_FORMAT PdhErrno = 0xc0000bf5 + PDH_COUNTER_ALREADY_IN_QUERY PdhErrno = 0xc0000bf6 + PDH_BINARY_LOG_CORRUPT PdhErrno = 0xc0000bf7 + PDH_LOG_SAMPLE_TOO_SMALL PdhErrno = 0xc0000bf8 + PDH_OS_LATER_VERSION PdhErrno = 0xc0000bf9 + PDH_OS_EARLIER_VERSION PdhErrno = 0xc0000bfa + PDH_INCORRECT_APPEND_TIME PdhErrno = 0xc0000bfb + PDH_UNMATCHED_APPEND_COUNTER PdhErrno = 0xc0000bfc + PDH_SQL_ALTER_DETAIL_FAILED PdhErrno = 0xc0000bfd + PDH_QUERY_PERF_DATA_TIMEOUT PdhErrno = 0xc0000bfe +) + +var pdhErrors = map[PdhErrno]struct{}{ + PDH_CSTATUS_VALID_DATA: struct{}{}, + PDH_CSTATUS_NEW_DATA: struct{}{}, + PDH_CSTATUS_NO_MACHINE: struct{}{}, + PDH_CSTATUS_NO_INSTANCE: struct{}{}, + PDH_MORE_DATA: struct{}{}, + PDH_CSTATUS_ITEM_NOT_VALIDATED: struct{}{}, + PDH_RETRY: struct{}{}, + PDH_NO_DATA: struct{}{}, + PDH_CALC_NEGATIVE_DENOMINATOR: struct{}{}, + PDH_CALC_NEGATIVE_TIMEBASE: struct{}{}, + PDH_CALC_NEGATIVE_VALUE: struct{}{}, + PDH_DIALOG_CANCELLED: struct{}{}, + PDH_END_OF_LOG_FILE: struct{}{}, + PDH_ASYNC_QUERY_TIMEOUT: struct{}{}, + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE: struct{}{}, + PDH_CSTATUS_NO_OBJECT: struct{}{}, + PDH_CSTATUS_NO_COUNTER: struct{}{}, + PDH_CSTATUS_INVALID_DATA: struct{}{}, + PDH_MEMORY_ALLOCATION_FAILURE: struct{}{}, + PDH_INVALID_HANDLE: struct{}{}, + PDH_INVALID_ARGUMENT: struct{}{}, + PDH_FUNCTION_NOT_FOUND: struct{}{}, + PDH_CSTATUS_NO_COUNTERNAME: struct{}{}, + PDH_CSTATUS_BAD_COUNTERNAME: struct{}{}, + PDH_INVALID_BUFFER: struct{}{}, + PDH_INSUFFICIENT_BUFFER: struct{}{}, + PDH_CANNOT_CONNECT_MACHINE: struct{}{}, + PDH_INVALID_PATH: struct{}{}, + PDH_INVALID_INSTANCE: struct{}{}, + PDH_INVALID_DATA: struct{}{}, + PDH_NO_DIALOG_DATA: struct{}{}, + PDH_CANNOT_READ_NAME_STRINGS: struct{}{}, + PDH_LOG_FILE_CREATE_ERROR: struct{}{}, + PDH_LOG_FILE_OPEN_ERROR: struct{}{}, + PDH_LOG_TYPE_NOT_FOUND: struct{}{}, + PDH_NO_MORE_DATA: struct{}{}, + PDH_ENTRY_NOT_IN_LOG_FILE: struct{}{}, + PDH_DATA_SOURCE_IS_LOG_FILE: struct{}{}, + PDH_DATA_SOURCE_IS_REAL_TIME: struct{}{}, + PDH_UNABLE_READ_LOG_HEADER: struct{}{}, + PDH_FILE_NOT_FOUND: struct{}{}, + PDH_FILE_ALREADY_EXISTS: struct{}{}, + PDH_NOT_IMPLEMENTED: struct{}{}, + PDH_STRING_NOT_FOUND: struct{}{}, + PDH_UNABLE_MAP_NAME_FILES: struct{}{}, + PDH_UNKNOWN_LOG_FORMAT: struct{}{}, + PDH_UNKNOWN_LOGSVC_COMMAND: struct{}{}, + PDH_LOGSVC_QUERY_NOT_FOUND: struct{}{}, + PDH_LOGSVC_NOT_OPENED: struct{}{}, + PDH_WBEM_ERROR: struct{}{}, + PDH_ACCESS_DENIED: struct{}{}, + PDH_LOG_FILE_TOO_SMALL: struct{}{}, + PDH_INVALID_DATASOURCE: struct{}{}, + PDH_INVALID_SQLDB: struct{}{}, + PDH_NO_COUNTERS: struct{}{}, + PDH_SQL_ALLOC_FAILED: struct{}{}, + PDH_SQL_ALLOCCON_FAILED: struct{}{}, + PDH_SQL_EXEC_DIRECT_FAILED: struct{}{}, + PDH_SQL_FETCH_FAILED: struct{}{}, + PDH_SQL_ROWCOUNT_FAILED: struct{}{}, + PDH_SQL_MORE_RESULTS_FAILED: struct{}{}, + PDH_SQL_CONNECT_FAILED: struct{}{}, + PDH_SQL_BIND_FAILED: struct{}{}, + PDH_CANNOT_CONNECT_WMI_SERVER: struct{}{}, + PDH_PLA_COLLECTION_ALREADY_RUNNING: struct{}{}, + PDH_PLA_ERROR_SCHEDULE_OVERLAP: struct{}{}, + PDH_PLA_COLLECTION_NOT_FOUND: struct{}{}, + PDH_PLA_ERROR_SCHEDULE_ELAPSED: struct{}{}, + PDH_PLA_ERROR_NOSTART: struct{}{}, + PDH_PLA_ERROR_ALREADY_EXISTS: struct{}{}, + PDH_PLA_ERROR_TYPE_MISMATCH: struct{}{}, + PDH_PLA_ERROR_FILEPATH: struct{}{}, + PDH_PLA_SERVICE_ERROR: struct{}{}, + PDH_PLA_VALIDATION_ERROR: struct{}{}, + PDH_PLA_VALIDATION_WARNING: struct{}{}, + PDH_PLA_ERROR_NAME_TOO_LONG: struct{}{}, + PDH_INVALID_SQL_LOG_FORMAT: struct{}{}, + PDH_COUNTER_ALREADY_IN_QUERY: struct{}{}, + PDH_BINARY_LOG_CORRUPT: struct{}{}, + PDH_LOG_SAMPLE_TOO_SMALL: struct{}{}, + PDH_OS_LATER_VERSION: struct{}{}, + PDH_OS_EARLIER_VERSION: struct{}{}, + PDH_INCORRECT_APPEND_TIME: struct{}{}, + PDH_UNMATCHED_APPEND_COUNTER: struct{}{}, + PDH_SQL_ALTER_DETAIL_FAILED: struct{}{}, + PDH_QUERY_PERF_DATA_TIMEOUT: struct{}{}, +} + +type PdhCounterFormat uint32 + +const ( + PdhFmtDouble PdhCounterFormat = 0x200 + + PdhFmtLarge PdhCounterFormat = 0x400 + + PdhFmtLong PdhCounterFormat = 0x100 + + PdhFmtNoScale PdhCounterFormat = 0x1000 + + PdhFmtNoCap100 PdhCounterFormat = 0x8000 + + PdhFmtMultiply1000 PdhCounterFormat = 0x2000 +) diff --git a/metricbeat/helper/windows/pdh/doc.go b/metricbeat/helper/windows/pdh/doc.go index 736bde4bec9a..71f1b10067aa 100644 --- a/metricbeat/helper/windows/pdh/doc.go +++ b/metricbeat/helper/windows/pdh/doc.go @@ -20,5 +20,7 @@ package pdh //go:generate go run mkpdh_defs.go //go:generate go run ../run.go -cmd "go tool cgo -godefs defs_pdh_windows.go" -goarch amd64 -output defs_pdh_windows_amd64.go //go:generate go run ../run.go -cmd "go tool cgo -godefs defs_pdh_windows.go" -goarch 386 -output defs_pdh_windows_386.go +//go:generate go run ../run.go -cmd "go tool cgo -godefs defs_pdh_windows.go" -goarch arm64 -output defs_pdh_windows_arm64.go +//go:generate go run ../run.go -cmd "go tool cgo -godefs defs_pdh_windows.go" -goarch arm -output defs_pdh_windows_arm.go //go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zpdh_windows.go pdh_windows.go -//go:generate goimports -w defs_pdh_windows_amd64.go defs_pdh_windows_386.go zpdh_windows.go +//go:generate goimports -w defs_pdh_windows_amd64.go defs_pdh_windows_386.go defs_pdh_windows_arm64.go defs_pdh_windows_arm.go zpdh_windows.go diff --git a/metricbeat/module/windows/service/defs_service_windows_arm.go b/metricbeat/module/windows/service/defs_service_windows_arm.go new file mode 100644 index 000000000000..ebf58d8d4bde --- /dev/null +++ b/metricbeat/module/windows/service/defs_service_windows_arm.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Created by cgo -godefs - DO NOT EDIT +// cgo.exe -godefs defs_service_windows.go + +package service + +type ServiceErrno uintptr + +const ( + SERVICE_ERROR_ACCESS_DENIED ServiceErrno = 0x5 + SERVICE_ERROR_MORE_DATA ServiceErrno = 0xea + SERVICE_ERROR_INVALID_PARAMETER ServiceErrno = 0x57 + SERVICE_ERROR_INVALID_HANDLE ServiceErrno = 0x6 + SERVICE_ERROR_INVALID_LEVEL ServiceErrno = 0x7c + SERVICE_ERROR_INVALID_NAME ServiceErrno = 0x7b + SERVICE_ERROR_SHUTDOWN_IN_PROGRESS ServiceErrno = 0x45b + SERVICE_ERROR_DATABASE_DOES_NOT_EXIST ServiceErrno = 0x429 + SERVICE_ERROR_INSUFFICIENT_BUFFER ServiceErrno = 0x7a + SERVICE_ERROR_SERVICE_DOES_NOT_EXIST ServiceErrno = 0x424 +) + +type ServiceErrorControl uint32 + +const ( + SERVICE_ERROR_CRITICAL ServiceErrno = 0x3 + SERVICE_ERROR_IGNORE ServiceErrno = 0x0 + SERVICE_ERROR_NORMAL ServiceErrno = 0x1 + SERVICE_ERROR_SEVERE ServiceErrno = 0x2 +) + +var serviceErrors = map[ServiceErrno]struct{}{ + SERVICE_ERROR_ACCESS_DENIED: struct{}{}, + SERVICE_ERROR_MORE_DATA: struct{}{}, + SERVICE_ERROR_INVALID_PARAMETER: struct{}{}, + SERVICE_ERROR_INVALID_HANDLE: struct{}{}, + SERVICE_ERROR_INVALID_LEVEL: struct{}{}, + SERVICE_ERROR_INVALID_NAME: struct{}{}, + SERVICE_ERROR_SHUTDOWN_IN_PROGRESS: struct{}{}, + SERVICE_ERROR_DATABASE_DOES_NOT_EXIST: struct{}{}, + SERVICE_ERROR_INSUFFICIENT_BUFFER: struct{}{}, + SERVICE_ERROR_CRITICAL: struct{}{}, + SERVICE_ERROR_IGNORE: struct{}{}, + SERVICE_ERROR_NORMAL: struct{}{}, + SERVICE_ERROR_SEVERE: struct{}{}, + SERVICE_ERROR_SERVICE_DOES_NOT_EXIST: struct{}{}, +} + +type ServiceType uint32 + +const ( + ServiceDriver ServiceType = 0xb + + ServiceFileSystemDriver ServiceType = 0x2 + + ServiceKernelDriver ServiceType = 0x1 + + ServiceWin32 ServiceType = 0x30 + + ServiceWin32OwnProcess ServiceType = 0x10 + + ServiceWin32Shareprocess ServiceType = 0x20 + ServiceInteractiveProcess ServiceType = 0x100 +) + +type ServiceState uint32 + +const ( + ServiceContinuePending ServiceState = 0x5 + ServicePausePending ServiceState = 0x6 + ServicePaused ServiceState = 0x7 + ServiceRunning ServiceState = 0x4 + ServiceStartPending ServiceState = 0x2 + ServiceStopPending ServiceState = 0x3 + ServiceStopped ServiceState = 0x1 +) + +type ServiceEnumState uint32 + +const ( + ServiceActive ServiceEnumState = 0x1 + + ServiceInActive ServiceEnumState = 0x2 + + ServiceStateAll ServiceEnumState = 0x3 +) + +type ServiceSCMAccessRight uint32 + +const ( + ScManagerAllAccess ServiceSCMAccessRight = 0xf003f + + ScManagerConnect ServiceSCMAccessRight = 0x1 + + ScManagerEnumerateService ServiceSCMAccessRight = 0x4 + + ScManagerQueryLockStatus ServiceSCMAccessRight = 0x10 +) + +type ServiceAccessRight uint32 + +const ( + ServiceAllAccess ServiceAccessRight = 0xf01ff + + ServiceChangeConfig ServiceAccessRight = 0x2 + + ServiceEnumerateDependents ServiceAccessRight = 0x8 + + ServiceInterrogate ServiceAccessRight = 0x80 + + ServicePauseContinue ServiceAccessRight = 0x40 + + ServiceQueryConfig ServiceAccessRight = 0x1 + + ServiceQueryStatus ServiceAccessRight = 0x4 + + ServiceStart ServiceAccessRight = 0x10 + + ServiceStop ServiceAccessRight = 0x20 + + ServiceUserDefinedControl ServiceAccessRight = 0x100 +) + +type ServiceInfoLevel uint32 + +const ( + ScEnumProcessInfo ServiceInfoLevel = 0x0 +) + +type ServiceStartType uint32 + +const ( + ServiceAutoStart ServiceStartType = 0x2 + + ServiceBootStart ServiceStartType = 0x0 + + ServiceDemandStart ServiceStartType = 0x3 + + ServiceDisabled ServiceStartType = 0x4 + + ServiceSystemStart ServiceStartType = 0x1 +) + +type ProcessAccessRight uint32 + +const ( + ProcessAllAccess ProcessAccessRight = 0x1f0fff + ProcessCreateProcess ProcessAccessRight = 0x80 + ProcessCreateThread ProcessAccessRight = 0x2 + ProcessDupHandle ProcessAccessRight = 0x40 + ProcessQueryInformation ProcessAccessRight = 0x400 + ProcessQueryLimitInformation ProcessAccessRight = 0x1000 + ProcessSetInformation ProcessAccessRight = 0x200 + ProcessSetQuota ProcessAccessRight = 0x100 + ProcessSuspendResume ProcessAccessRight = 0x800 + ProcessTerminate ProcessAccessRight = 0x1 + ProcessVmOperation ProcessAccessRight = 0x8 + ProcessVmRead ProcessAccessRight = 0x10 + ProcessVmWrite ProcessAccessRight = 0x20 + ProcessSynchronize ProcessAccessRight = 0x100000 +) + +type ServiceStatusProcess struct { + DwServiceType uint32 + DwCurrentState uint32 + DwControlsAccepted uint32 + DwWin32ExitCode uint32 + DwServiceSpecificExitCode uint32 + DwCheckPoint uint32 + DwWaitHint uint32 + DwProcessId uint32 + DwServiceFlags uint32 +} + +type EnumServiceStatusProcess struct { + LpServiceName *int8 + LpDisplayName *int8 + ServiceStatusProcess ServiceStatusProcess +} + +type QueryServiceConfig struct { + DwServiceType uint32 + DwStartType uint32 + DwErrorControl uint32 + LpBinaryPathName *int8 + LpLoadOrderGroup *int8 + DwTagId uint32 + LpDependencies *int8 + LpServiceStartName *int8 + LpDisplayName *int8 +} diff --git a/metricbeat/module/windows/service/defs_service_windows_arm64.go b/metricbeat/module/windows/service/defs_service_windows_arm64.go new file mode 100644 index 000000000000..9b2b6686d57d --- /dev/null +++ b/metricbeat/module/windows/service/defs_service_windows_arm64.go @@ -0,0 +1,209 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Created by cgo -godefs - DO NOT EDIT +// cgo.exe -godefs defs_service_windows.go + +package service + +type ServiceErrno uintptr + +const ( + SERVICE_ERROR_ACCESS_DENIED ServiceErrno = 0x5 + SERVICE_ERROR_MORE_DATA ServiceErrno = 0xea + SERVICE_ERROR_INVALID_PARAMETER ServiceErrno = 0x57 + SERVICE_ERROR_INVALID_HANDLE ServiceErrno = 0x6 + SERVICE_ERROR_INVALID_LEVEL ServiceErrno = 0x7c + SERVICE_ERROR_INVALID_NAME ServiceErrno = 0x7b + SERVICE_ERROR_SHUTDOWN_IN_PROGRESS ServiceErrno = 0x45b + SERVICE_ERROR_DATABASE_DOES_NOT_EXIST ServiceErrno = 0x429 + SERVICE_ERROR_INSUFFICIENT_BUFFER ServiceErrno = 0x7a + SERVICE_ERROR_SERVICE_DOES_NOT_EXIST ServiceErrno = 0x424 +) + +type ServiceErrorControl uint32 + +const ( + SERVICE_ERROR_CRITICAL ServiceErrno = 0x3 + SERVICE_ERROR_IGNORE ServiceErrno = 0x0 + SERVICE_ERROR_NORMAL ServiceErrno = 0x1 + SERVICE_ERROR_SEVERE ServiceErrno = 0x2 +) + +var serviceErrors = map[ServiceErrno]struct{}{ + SERVICE_ERROR_ACCESS_DENIED: struct{}{}, + SERVICE_ERROR_MORE_DATA: struct{}{}, + SERVICE_ERROR_INVALID_PARAMETER: struct{}{}, + SERVICE_ERROR_INVALID_HANDLE: struct{}{}, + SERVICE_ERROR_INVALID_LEVEL: struct{}{}, + SERVICE_ERROR_INVALID_NAME: struct{}{}, + SERVICE_ERROR_SHUTDOWN_IN_PROGRESS: struct{}{}, + SERVICE_ERROR_DATABASE_DOES_NOT_EXIST: struct{}{}, + SERVICE_ERROR_INSUFFICIENT_BUFFER: struct{}{}, + SERVICE_ERROR_CRITICAL: struct{}{}, + SERVICE_ERROR_IGNORE: struct{}{}, + SERVICE_ERROR_NORMAL: struct{}{}, + SERVICE_ERROR_SEVERE: struct{}{}, + SERVICE_ERROR_SERVICE_DOES_NOT_EXIST: struct{}{}, +} + +type ServiceType uint32 + +const ( + ServiceDriver ServiceType = 0xb + + ServiceFileSystemDriver ServiceType = 0x2 + + ServiceKernelDriver ServiceType = 0x1 + + ServiceWin32 ServiceType = 0x30 + + ServiceWin32OwnProcess ServiceType = 0x10 + + ServiceWin32Shareprocess ServiceType = 0x20 + ServiceInteractiveProcess ServiceType = 0x100 +) + +type ServiceState uint32 + +const ( + ServiceContinuePending ServiceState = 0x5 + ServicePausePending ServiceState = 0x6 + ServicePaused ServiceState = 0x7 + ServiceRunning ServiceState = 0x4 + ServiceStartPending ServiceState = 0x2 + ServiceStopPending ServiceState = 0x3 + ServiceStopped ServiceState = 0x1 +) + +type ServiceEnumState uint32 + +const ( + ServiceActive ServiceEnumState = 0x1 + + ServiceInActive ServiceEnumState = 0x2 + + ServiceStateAll ServiceEnumState = 0x3 +) + +type ServiceSCMAccessRight uint32 + +const ( + ScManagerAllAccess ServiceSCMAccessRight = 0xf003f + + ScManagerConnect ServiceSCMAccessRight = 0x1 + + ScManagerEnumerateService ServiceSCMAccessRight = 0x4 + + ScManagerQueryLockStatus ServiceSCMAccessRight = 0x10 +) + +type ServiceAccessRight uint32 + +const ( + ServiceAllAccess ServiceAccessRight = 0xf01ff + + ServiceChangeConfig ServiceAccessRight = 0x2 + + ServiceEnumerateDependents ServiceAccessRight = 0x8 + + ServiceInterrogate ServiceAccessRight = 0x80 + + ServicePauseContinue ServiceAccessRight = 0x40 + + ServiceQueryConfig ServiceAccessRight = 0x1 + + ServiceQueryStatus ServiceAccessRight = 0x4 + + ServiceStart ServiceAccessRight = 0x10 + + ServiceStop ServiceAccessRight = 0x20 + + ServiceUserDefinedControl ServiceAccessRight = 0x100 +) + +type ServiceInfoLevel uint32 + +const ( + ScEnumProcessInfo ServiceInfoLevel = 0x0 +) + +type ServiceStartType uint32 + +const ( + ServiceAutoStart ServiceStartType = 0x2 + + ServiceBootStart ServiceStartType = 0x0 + + ServiceDemandStart ServiceStartType = 0x3 + + ServiceDisabled ServiceStartType = 0x4 + + ServiceSystemStart ServiceStartType = 0x1 +) + +type ProcessAccessRight uint32 + +const ( + ProcessAllAccess ProcessAccessRight = 0x1f0fff + ProcessCreateProcess ProcessAccessRight = 0x80 + ProcessCreateThread ProcessAccessRight = 0x2 + ProcessDupHandle ProcessAccessRight = 0x40 + ProcessQueryInformation ProcessAccessRight = 0x400 + ProcessQueryLimitInformation ProcessAccessRight = 0x1000 + ProcessSetInformation ProcessAccessRight = 0x200 + ProcessSetQuota ProcessAccessRight = 0x100 + ProcessSuspendResume ProcessAccessRight = 0x800 + ProcessTerminate ProcessAccessRight = 0x1 + ProcessVmOperation ProcessAccessRight = 0x8 + ProcessVmRead ProcessAccessRight = 0x10 + ProcessVmWrite ProcessAccessRight = 0x20 + ProcessSynchronize ProcessAccessRight = 0x100000 +) + +type ServiceStatusProcess struct { + DwServiceType uint32 + DwCurrentState uint32 + DwControlsAccepted uint32 + DwWin32ExitCode uint32 + DwServiceSpecificExitCode uint32 + DwCheckPoint uint32 + DwWaitHint uint32 + DwProcessId uint32 + DwServiceFlags uint32 +} + +type EnumServiceStatusProcess struct { + LpServiceName *int8 + LpDisplayName *int8 + ServiceStatusProcess ServiceStatusProcess + Pad_cgo_0 [4]byte +} + +type QueryServiceConfig struct { + DwServiceType uint32 + DwStartType uint32 + DwErrorControl uint32 + Pad_cgo_0 [4]byte + LpBinaryPathName *int8 + LpLoadOrderGroup *int8 + DwTagId uint32 + Pad_cgo_1 [4]byte + LpDependencies *int8 + LpServiceStartName *int8 + LpDisplayName *int8 +} diff --git a/metricbeat/module/windows/service/doc.go b/metricbeat/module/windows/service/doc.go index a766843e5042..116ac7c58efa 100644 --- a/metricbeat/module/windows/service/doc.go +++ b/metricbeat/module/windows/service/doc.go @@ -20,5 +20,7 @@ package service //go:generate go run ../../../helper/windows/run.go -cmd "go tool cgo -godefs defs_service_windows.go" -goarch amd64 -output defs_service_windows_amd64.go //go:generate go run ../../../helper/windows/run.go -cmd "go tool cgo -godefs defs_service_windows.go" -goarch 386 -output defs_service_windows_386.go +//go:generate go run ../../../helper/windows/run.go -cmd "go tool cgo -godefs defs_service_windows.go" -goarch arm64 -output defs_service_windows_arm64.go +//go:generate go run ../../../helper/windows/run.go -cmd "go tool cgo -godefs defs_service_windows.go" -goarch arm -output defs_service_windows_arm.go //go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zservice_windows.go service_windows.go -//go:generate goimports -w defs_service_windows_amd64.go defs_service_windows_386.go +//go:generate goimports -w defs_service_windows_amd64.go defs_service_windows_386.go defs_service_windows_arm64.go defs_service_windows_arm.go From 9fde7b0078774c4c7df1c87cbc3b4a7b6b463e66 Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Mon, 14 Oct 2024 06:40:42 +1030 Subject: [PATCH 28/90] x-pack/filebeat/input/internal/private: add field redaction package (#40997) This package supports zeroing arbitrary fields based on a set of redaction paths or field sibling marks. --- .github/CODEOWNERS | 1 + CHANGELOG-developer.next.asciidoc | 1 + .../input/internal/private/private.go | 268 +++++++++++ .../input/internal/private/private_test.go | 436 ++++++++++++++++++ 4 files changed, 706 insertions(+) create mode 100644 x-pack/filebeat/input/internal/private/private.go create mode 100644 x-pack/filebeat/input/internal/private/private_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5eed05448d48..f04bf64fae47 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -123,6 +123,7 @@ CHANGELOG* /x-pack/filebeat/input/httpjson/ @elastic/security-service-integrations /x-pack/filebeat/input/internal/httplog @elastic/security-service-integrations /x-pack/filebeat/input/internal/httpmon @elastic/security-service-integrations +/x-pack/filebeat/input/internal/private @elastic/security-service-integrations /x-pack/filebeat/input/lumberjack/ @elastic/security-service-integrations /x-pack/filebeat/input/netflow/ @elastic/sec-deployment-and-devices /x-pack/filebeat/input/o365audit/ @elastic/security-service-integrations diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 92d93f88b98a..610078d225ea 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -207,6 +207,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Added filebeat debug histograms for s3 object size and events per processed s3 object. {pull}40775[40775] - Simplified GCS input state checkpoint calculation logic. {issue}40878[40878] {pull}40937[40937] - Simplified Azure Blob Storage input state checkpoint calculation logic. {issue}40674[40674] {pull}40936[40936] +- Add field redaction package. {pull}40997[40997] ==== Deprecated diff --git a/x-pack/filebeat/input/internal/private/private.go b/x-pack/filebeat/input/internal/private/private.go new file mode 100644 index 000000000000..e47b6521e477 --- /dev/null +++ b/x-pack/filebeat/input/internal/private/private.go @@ -0,0 +1,268 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Package private implements field redaction in maps and structs. +package private + +import ( + "fmt" + "reflect" + "slices" + "strings" + "unsafe" +) + +const tooDeep = 100 + +var privateKey = reflect.ValueOf("private") + +// Redact returns a copy of val with any fields or map elements that have been +// marked as private removed. Fields can be marked as private by including a +// sibling string- or []string-valued field or element with the name of the +// private field. The names of fields are interpreted through the tag parameter +// if present. For example if tag is "json", the `json:""` name would be +// used, falling back to the field name if not present. The tag parameter is +// ignored for map values. +// +// The global parameter indicates a set of dot-separated paths to redact. Paths +// originate at the root of val. If global is used, the resultin redaction is on +// the union of the fields redacted with tags and the fields redacted with the +// global paths. +// +// If a field has a `private:...` tag, its tag value will also be used to +// determine the list of private fields. If the private tag is empty, +// `private:""`, the fields with the tag will be marked as private. Otherwise +// the comma-separated list of names with be used. The list may refer to its +// own field. +func Redact[T any](val T, tag string, global []string) (redacted T, err error) { + defer func() { + switch r := recover().(type) { + case nil: + return + case cycle: + // Make the returned type informative in all cases. + // If Redact[any](v) is called and we use the zero + // value, we would return a nil any, which is less + // informative. + redacted = reflect.New(reflect.TypeOf(val)).Elem().Interface().(T) + err = r + default: + panic(r) + } + }() + rv := reflect.ValueOf(val) + switch rv.Kind() { + case reflect.Map, reflect.Pointer, reflect.Struct: + return redact(rv, tag, slices.Clone(global), 0, make(map[any]int)).Interface().(T), nil + default: + return val, nil + } +} + +func redact(v reflect.Value, tag string, global []string, depth int, seen map[any]int) reflect.Value { + switch v.Kind() { + case reflect.Pointer: + if v.IsNil() { + return v + } + if depth > tooDeep { + ident := v.Interface() + if last, ok := seen[ident]; ok && last < depth { + panic(cycle{v.Type()}) + } + seen[ident] = depth + defer delete(seen, ident) + } + return redact(v.Elem(), tag, global, depth+1, seen).Addr() + case reflect.Interface: + if v.IsNil() { + return v + } + return redact(v.Elem(), tag, global, depth+1, seen) + case reflect.Array: + if v.Len() == 0 { + return v + } + r := reflect.New(v.Type()).Elem() + for i := 0; i < v.Len(); i++ { + r.Index(i).Set(redact(v.Index(i), tag, global, depth+1, seen)) + } + return r + case reflect.Slice: + if v.Len() == 0 { + return v + } + if depth > tooDeep { + ident := struct { + data unsafe.Pointer + len int + }{ + v.UnsafePointer(), + v.Len(), + } + if last, ok := seen[ident]; ok && last < depth { + panic(cycle{v.Type()}) + } + seen[ident] = depth + defer delete(seen, ident) + } + r := reflect.MakeSlice(v.Type(), v.Len(), v.Cap()) + for i := 0; i < v.Len(); i++ { + r.Index(i).Set(redact(v.Index(i), tag, global, depth+1, seen)) + } + return r + case reflect.Map: + if v.IsNil() { + return v + } + if depth > tooDeep { + ident := v.UnsafePointer() + if last, ok := seen[ident]; ok && last < depth { + panic(cycle{v.Type()}) + } + seen[ident] = depth + defer delete(seen, ident) + } + private := nextStep(global) + if privateKey.CanConvert(v.Type().Key()) { + p := v.MapIndex(privateKey.Convert(v.Type().Key())) + if p.IsValid() && p.CanInterface() { + switch p := p.Interface().(type) { + case string: + private = append(private, p) + case []string: + private = append(private, p...) + case []any: + for _, s := range p { + private = append(private, fmt.Sprint(s)) + } + } + } + } + r := reflect.MakeMap(v.Type()) + it := v.MapRange() + for it.Next() { + name := it.Key().String() + if slices.Contains(private, name) { + continue + } + r.SetMapIndex(it.Key(), redact(it.Value(), tag, nextPath(name, global), depth+1, seen)) + } + return r + case reflect.Struct: + private := nextStep(global) + rt := v.Type() + names := make([]string, rt.NumField()) + for i := range names { + f := rt.Field(i) + + // Look for `private:` tags. + p, ok := f.Tag.Lookup("private") + if ok { + if p != "" { + private = append(private, strings.Split(p, ",")...) + } else { + if tag == "" { + names[i] = f.Name + private = append(private, f.Name) + } else { + p = f.Tag.Get(tag) + if p != "" { + name, _, _ := strings.Cut(p, ",") + names[i] = name + private = append(private, name) + } + } + } + } + + // Look after Private fields if we are not using a tag. + if tag == "" { + names[i] = f.Name + if f.Name == "Private" { + switch p := v.Field(i).Interface().(type) { + case string: + private = append(private, p) + case []string: + private = append(private, p...) + } + } + continue + } + + // If we are using a tag, look for `tag:""` + // falling back to fields named Private if no tag is + // present. + p = f.Tag.Get(tag) + var name string + if p == "" { + name = f.Name + } else { + name, _, _ = strings.Cut(p, ",") + } + names[i] = name + if name == "private" { + switch p := v.Field(i).Interface().(type) { + case string: + private = append(private, p) + case []string: + private = append(private, p...) + } + } + } + + r := reflect.New(v.Type()).Elem() + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + if f.IsZero() || !rt.Field(i).IsExported() { + continue + } + if slices.Contains(private, names[i]) { + continue + } + if r.Field(i).CanSet() { + r.Field(i).Set(redact(f, tag, nextPath(names[i], global), depth+1, seen)) + } + } + return r + } + return v +} + +func nextStep(global []string) (private []string) { + if len(global) == 0 { + return nil + } + private = make([]string, 0, len(global)) + for _, s := range global { + key, _, more := strings.Cut(s, ".") + if !more { + private = append(private, key) + } + } + return private +} + +func nextPath(step string, global []string) []string { + if len(global) == 0 { + return nil + } + step += "." + next := make([]string, 0, len(global)) + for _, s := range global { + if !strings.HasPrefix(s, step) { + continue + } + next = append(next, s[len(step):]) + } + return next +} + +type cycle struct { + typ reflect.Type +} + +func (e cycle) Error() string { + return fmt.Sprintf("cycle including %s", e.typ) +} diff --git a/x-pack/filebeat/input/internal/private/private_test.go b/x-pack/filebeat/input/internal/private/private_test.go new file mode 100644 index 000000000000..774e35f3d532 --- /dev/null +++ b/x-pack/filebeat/input/internal/private/private_test.go @@ -0,0 +1,436 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package private + +import ( + "bytes" + "encoding/json" + "net/url" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" +) + +type redactTest struct { + name string + in any + tag string + global []string + want any + wantErr error +} + +var redactTests = []redactTest{ + { + name: "map_string", + in: map[string]any{ + "private": "secret", + "secret": "1", + "not_secret": "2", + }, + want: map[string]any{ + "private": "secret", + "not_secret": "2", + }, + }, + { + name: "map_string_inner", + in: map[string]any{ + "inner": map[string]any{ + "private": "secret", + "secret": "1", + "not_secret": "2", + }}, + want: map[string]any{ + "inner": map[string]any{ + "private": "secret", + "not_secret": "2", + }}, + }, + { + name: "map_string_inner_global", + in: map[string]any{ + "inner": map[string]any{ + "secret": "1", + "not_secret": "2", + }}, + global: []string{"inner.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "not_secret": "2", + }}, + }, + { + name: "map_string_inner_next_inner_global", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "secret": "1", + "not_secret": "2", + }, + }}, + global: []string{"inner.next_inner.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "not_secret": "2", + }, + }}, + }, + { + name: "map_string_inner_next_inner_params_global", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "headers": url.Values{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + }}, + global: []string{"inner.next_inner.headers.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "headers": url.Values{ + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + }}, + }, + { + name: "map_string_inner_next_inner_params_global_internal", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "headers": url.Values{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + }}, + global: []string{"inner.next_inner.headers"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "not_secret": "2", + }, + }}, + }, + { + name: "map_string_inner_next_inner_params_global_internal_slice", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": []map[string]any{ + { + "headers": url.Values{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + { + "headers": url.Values{ + "secret": []string{"3"}, + "not_secret": []string{"4"}, + }, + "not_secret": "4", + }, + }, + }}, + global: []string{"inner.next_inner.headers"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": []map[string]any{ + {"not_secret": "2"}, + {"not_secret": "4"}, + }, + }}, + }, + { + name: "map_string_inner_next_inner_params_global_internal_slice_precise", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": []map[string]any{ + { + "headers": url.Values{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + { + "headers": url.Values{ + "secret": []string{"3"}, + "not_secret": []string{"4"}, + }, + "not_secret": "4", + }, + }, + }}, + global: []string{"inner.next_inner.headers.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": []map[string]any{ + { + "headers": url.Values{ + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + { + "headers": url.Values{ + "not_secret": []string{"4"}, + }, + "not_secret": "4", + }, + }, + }}, + }, + { + name: "map_slice", + in: map[string]any{ + "private": []string{"secret"}, + "secret": "1", + "not_secret": "2", + }, + want: map[string]any{ + "private": []string{"secret"}, + "not_secret": "2", + }, + }, + { + name: "map_cycle", + in: func() any { + m := map[string]any{ + "private": "secret", + "secret": "1", + "not_secret": "2", + } + m["loop"] = m + return m + }(), + want: map[string]any(nil), + wantErr: cycle{reflect.TypeOf(map[string]any(nil))}, + }, + func() redactTest { + type s struct { + Private string + Secret string + NotSecret string + } + return redactTest{ + name: "struct_string", + in: s{ + Private: "Secret", + Secret: "1", + NotSecret: "2", + }, + tag: "", + want: s{ + Private: "Secret", + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private []string + Secret string + NotSecret string + } + return redactTest{ + name: "struct_slice", + in: s{ + Private: []string{"Secret"}, + Secret: "1", + NotSecret: "2", + }, + tag: "", + want: s{ + Private: []string{"Secret"}, + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private string + Secret string + NotSecret string + Loop *s + } + v := s{ + Private: "Secret", + Secret: "1", + NotSecret: "2", + } + v.Loop = &v + return redactTest{ + name: "struct_loop", + in: v, + tag: "", + want: s{}, + wantErr: cycle{reflect.TypeOf(&s{})}, + } + }(), + func() redactTest { + type s struct { + Private string `json:"private"` + Secret string `json:"secret"` + NotSecret string `json:"not_secret"` + } + return redactTest{ + name: "struct_string_json", + in: s{ + Private: "secret", + Secret: "1", + NotSecret: "2", + }, + tag: "json", + want: s{ + Private: "secret", + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private struct{} `private:"secret"` + Secret string `json:"secret"` + NotSecret string `json:"not_secret"` + } + return redactTest{ + name: "struct_string_on_tag_json", + in: s{ + Secret: "1", + NotSecret: "2", + }, + tag: "json", + want: s{ + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private struct{} `private:"secret1,secret2"` + Secret1 string `json:"secret1"` + Secret2 string `json:"secret2"` + NotSecret string `json:"not_secret"` + } + return redactTest{ + name: "struct_string_list_on_tag_json", + in: s{ + Secret1: "1", + Secret2: "1", + NotSecret: "2", + }, + tag: "json", + want: s{ + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private string `json:"private"` + Secret string + NotSecret string `json:"not_secret"` + } + return redactTest{ + name: "struct_string_json_missing_tag", + in: s{ + Private: "Secret", + Secret: "1", + NotSecret: "2", + }, + tag: "json", + want: s{ + Private: "Secret", + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private []string `json:"private"` + Secret string `json:"secret"` + NotSecret string `json:"not_secret"` + } + return redactTest{ + name: "struct_slice_json", + in: s{ + Private: []string{"secret"}, + Secret: "1", + NotSecret: "2", + }, + tag: "json", + want: s{ + Private: []string{"secret"}, + NotSecret: "2", + }, + } + }(), + func() redactTest { + type s struct { + Private string `json:"private"` + Secret string `json:"secret"` + NotSecret string `json:"not_secret"` + Loop *s `json:"loop"` + } + v := s{ + Private: "secret", + Secret: "1", + NotSecret: "2", + } + v.Loop = &v + return redactTest{ + name: "struct_loop_json", + in: v, + tag: "json", + want: s{}, + wantErr: cycle{reflect.TypeOf(&s{})}, + } + }(), +} + +func TestRedact(t *testing.T) { + allow := cmp.AllowUnexported() + + for _, test := range redactTests { + t.Run(test.name, func(t *testing.T) { + var before []byte + _, isCycle := test.wantErr.(cycle) + if !isCycle { + var err error + before, err = json.Marshal(test.in) + if err != nil { + t.Fatalf("failed to get before state: %v", err) + } + } + got, err := Redact(test.in, test.tag, test.global) + if err != test.wantErr { + t.Fatalf("unexpected error from Redact: %v", err) + } + if !isCycle { + after, err := json.Marshal(test.in) + if err != nil { + t.Fatalf("failed to get after state: %v", err) + } + if !bytes.Equal(before, after) { + t.Errorf("unexpected change in input:\n---:\n+++:\n%s", cmp.Diff(before, after)) + } + } + if !cmp.Equal(test.want, got, allow) { + t.Errorf("unexpected paths:\n--- want:\n+++ got:\n%s", cmp.Diff(test.want, got, allow)) + } + }) + } +} From ee780d252892469931b887643c006714dff08b22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Mon, 14 Oct 2024 12:50:54 +0200 Subject: [PATCH 29/90] Only watch metadata for ReplicaSets in K8s (#41100) * Bump github.com/elastic/elastic-agent-autodiscover to v0.9.0 * Only watch metadata for ReplicaSets in k8s autodiscovery * Only watch metadata for ReplicaSets in add_kubernetes_metadata processor * Fix linter warnings * Merge changelog entries --- CHANGELOG.next.asciidoc | 2 +- NOTICE.txt | 4 +- go.mod | 2 +- go.sum | 4 +- .../autodiscover/providers/kubernetes/pod.go | 71 ++++++++++++------- .../providers/kubernetes/pod_test.go | 13 +++- .../add_kubernetes_metadata/kubernetes.go | 30 +++++--- 7 files changed, 85 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index d3022fce790f..2118be15ec8c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -236,7 +236,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - The script processor has a new configuration option that only uses the cached javascript sessions and prevents the creation of new javascript sessions. - Update to Go 1.22.7. {pull}41018[41018] - Replace Ubuntu 20.04 with 24.04 for Docker base images {issue}40743[40743] {pull}40942[40942] - +- Reduce memory consumption of k8s autodiscovery and the add_kubernetes_metadata processor when Deployment metadata is enabled *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 2ea1ac2107c0..bb5807f9a419 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12575,11 +12575,11 @@ various licenses: -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.8.2 +Version: v0.9.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.8.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.9.0/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 03ea83236240..c643f16b1fa4 100644 --- a/go.mod +++ b/go.mod @@ -188,7 +188,7 @@ require ( github.com/dgraph-io/badger/v4 v4.2.1-0.20240828131336-2725dc8ed5c2 github.com/elastic/bayeux v1.0.5 github.com/elastic/ebpfevents v0.6.0 - github.com/elastic/elastic-agent-autodiscover v0.8.2 + github.com/elastic/elastic-agent-autodiscover v0.9.0 github.com/elastic/elastic-agent-libs v0.12.1 github.com/elastic/elastic-agent-system-metrics v0.11.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 diff --git a/go.sum b/go.sum index 63a740c46dd7..4f561fa3d6ec 100644 --- a/go.sum +++ b/go.sum @@ -352,8 +352,8 @@ github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqr github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= github.com/elastic/ebpfevents v0.6.0 h1:BrL3m7JFK7U6h2jkbk3xAWWs//IZnugCHEDds5u2v68= github.com/elastic/ebpfevents v0.6.0/go.mod h1:ESG9gw7N+n5yCCMgdg1IIJENKWSmX7+X0Fi9GUs9nvU= -github.com/elastic/elastic-agent-autodiscover v0.8.2 h1:Fs2FhR33AMBPfm5/jz4drVzaEZaqOIHlDBvGtkUZdIk= -github.com/elastic/elastic-agent-autodiscover v0.8.2/go.mod h1:VZnU53EVaFTxR8Xf6YsLN8FHD5DKQzHSPlKax9/4w+o= +github.com/elastic/elastic-agent-autodiscover v0.9.0 h1:+iWIKh0u3e8I+CJa3FfWe9h0JojNasPgYIA47gpuuns= +github.com/elastic/elastic-agent-autodiscover v0.9.0/go.mod h1:5iUxLHhVdaGSWYTveSwfJEY4RqPXTG13LPiFoxcpFd4= github.com/elastic/elastic-agent-client/v7 v7.15.0 h1:nDB7v8TBoNuD6IIzC3z7Q0y+7bMgXoT2DsHfolO2CHE= github.com/elastic/elastic-agent-client/v7 v7.15.0/go.mod h1:6h+f9QdIr3GO2ODC0Y8+aEXRwzbA5W4eV4dd/67z7nI= github.com/elastic/elastic-agent-libs v0.12.1 h1:5jkxMx15Bna8cq7/Sz/XUIVUXfNWiJ80iSk4ICQ7KJ0= diff --git a/libbeat/autodiscover/providers/kubernetes/pod.go b/libbeat/autodiscover/providers/kubernetes/pod.go index be7179873ecc..da018c6f6c24 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod.go +++ b/libbeat/autodiscover/providers/kubernetes/pod.go @@ -24,6 +24,8 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/gofrs/uuid/v5" k8s "k8s.io/client-go/kubernetes" @@ -135,11 +137,23 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod if metaConf.Deployment { - replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Namespace: config.Namespace, - HonorReSyncs: true, - }, nil) + metadataClient, err := kubernetes.GetKubernetesMetadataClient(config.KubeConfig, config.KubeClientOptions) + if err != nil { + logger.Errorf("Error creating metadata client due to error %+v", err) + } + replicaSetWatcher, err = kubernetes.NewNamedMetadataWatcher( + "resource_metadata_enricher_rs", + client, + metadataClient, + schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, + kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, + }, + nil, + metadata.RemoveUnnecessaryReplicaSetData, + ) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err) } @@ -225,23 +239,26 @@ func (p *pod) GenerateHints(event bus.Event) bus.Event { var kubeMeta, container mapstr.M annotations := make(mapstr.M, 0) - rawMeta, ok := event["kubernetes"] - if ok { - kubeMeta = rawMeta.(mapstr.M) - // The builder base config can configure any of the field values of kubernetes if need be. - e["kubernetes"] = kubeMeta - if rawAnn, ok := kubeMeta["annotations"]; ok { - anns, _ := rawAnn.(mapstr.M) - if len(anns) != 0 { - annotations = anns.Clone() + rawMeta, found := event["kubernetes"] + if found { + kubeMetaMap, ok := rawMeta.(mapstr.M) + if ok { + kubeMeta = kubeMetaMap + // The builder base config can configure any of the field values of kubernetes if need be. + e["kubernetes"] = kubeMeta + if rawAnn, ok := kubeMeta["annotations"]; ok { + anns, _ := rawAnn.(mapstr.M) + if len(anns) != 0 { + annotations = anns.Clone() + } } - } - // Look at all the namespace level default annotations and do a merge with priority going to the pod annotations. - if rawNsAnn, ok := kubeMeta["namespace_annotations"]; ok { - namespaceAnnotations, _ := rawNsAnn.(mapstr.M) - if len(namespaceAnnotations) != 0 { - annotations.DeepUpdateNoOverwrite(namespaceAnnotations) + // Look at all the namespace level default annotations and do a merge with priority going to the pod annotations. + if rawNsAnn, ok := kubeMeta["namespace_annotations"]; ok { + namespaceAnnotations, _ := rawNsAnn.(mapstr.M) + if len(namespaceAnnotations) != 0 { + annotations.DeepUpdateNoOverwrite(namespaceAnnotations) + } } } } @@ -255,12 +272,14 @@ func (p *pod) GenerateHints(event bus.Event) bus.Event { e["ports"] = ports } - if rawCont, ok := kubeMeta["container"]; ok { - container = rawCont.(mapstr.M) - // This would end up adding a runtime entry into the event. This would make sure - // that there is not an attempt to spin up a docker input for a rkt container and when a - // rkt input exists it would be natively supported. - e["container"] = container + if rawCont, found := kubeMeta["container"]; found { + if containerMap, ok := rawCont.(mapstr.M); ok { + container = containerMap + // This would end up adding a runtime entry into the event. This would make sure + // that there is not an attempt to spin up a docker input for a rkt container and when a + // rkt input exists it would be natively supported. + e["container"] = container + } } cname := utils.GetContainerName(container) diff --git a/libbeat/autodiscover/providers/kubernetes/pod_test.go b/libbeat/autodiscover/providers/kubernetes/pod_test.go index bb8731275b36..a9e2179cea17 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod_test.go +++ b/libbeat/autodiscover/providers/kubernetes/pod_test.go @@ -44,16 +44,19 @@ import ( func TestGenerateHints(t *testing.T) { tests := []struct { + name string event bus.Event result bus.Event }{ // Empty events should return empty hints { + name: "empty", event: bus.Event{}, result: bus.Event{}, }, // Only kubernetes payload must return only kubernetes as part of the hint { + name: "only kubernetes", event: bus.Event{ "kubernetes": mapstr.M{ "pod": mapstr.M{ @@ -71,6 +74,7 @@ func TestGenerateHints(t *testing.T) { }, // Kubernetes payload with container info must be bubbled to top level { + name: "kubernetes container info top level", event: bus.Event{ "kubernetes": mapstr.M{ "container": mapstr.M{ @@ -102,6 +106,7 @@ func TestGenerateHints(t *testing.T) { // not.to.include must not be part of hints // period is annotated at both container and pod level. Container level value must be in hints { + name: "multiple hints", event: bus.Event{ "kubernetes": mapstr.M{ "annotations": getNestedAnnotations(mapstr.M{ @@ -163,6 +168,7 @@ func TestGenerateHints(t *testing.T) { // Have one set of hints come from the pod and the other come from namespaces // The resultant hints should have a combination of both { + name: "hints from Pod and Namespace", event: bus.Event{ "kubernetes": mapstr.M{ "annotations": getNestedAnnotations(mapstr.M{ @@ -227,6 +233,7 @@ func TestGenerateHints(t *testing.T) { // Have one set of hints come from the pod and the same keys come from namespaces // The resultant hints should honor only pods and not namespace. { + name: "pod hints win over namespace", event: bus.Event{ "kubernetes": mapstr.M{ "annotations": getNestedAnnotations(mapstr.M{ @@ -288,6 +295,7 @@ func TestGenerateHints(t *testing.T) { // Have no hints on the pod and have namespace level defaults. // The resultant hints should honor only namespace defaults. { + name: "namespace defaults", event: bus.Event{ "kubernetes": mapstr.M{ "namespace_annotations": getNestedAnnotations(mapstr.M{ @@ -339,7 +347,10 @@ func TestGenerateHints(t *testing.T) { logger: logp.NewLogger("kubernetes.pod"), } for _, test := range tests { - assert.Equal(t, p.GenerateHints(test.event), test.result) + test := test + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.result, p.GenerateHints(test.event)) + }) } } diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index c22875ccf3c8..7bb1ddd0905d 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -25,6 +25,8 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/runtime/schema" + k8sclient "k8s.io/client-go/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes" @@ -235,11 +237,23 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod if metaConf.Deployment { - replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Namespace: config.Namespace, - HonorReSyncs: true, - }, nil) + metadataClient, err := kubernetes.GetKubernetesMetadataClient(config.KubeConfig, config.KubeClientOptions) + if err != nil { + k.log.Errorf("Error creating metadata client due to error %+v", err) + } + replicaSetWatcher, err = kubernetes.NewNamedMetadataWatcher( + "resource_metadata_enricher_rs", + client, + metadataClient, + schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, + kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, + }, + nil, + metadata.RemoveUnnecessaryReplicaSetData, + ) if err != nil { k.log.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err) } @@ -268,15 +282,15 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - pod := obj.(*kubernetes.Pod) + pod, _ := obj.(*kubernetes.Pod) k.addPod(pod) }, UpdateFunc: func(obj interface{}) { - pod := obj.(*kubernetes.Pod) + pod, _ := obj.(*kubernetes.Pod) k.updatePod(pod) }, DeleteFunc: func(obj interface{}) { - pod := obj.(*kubernetes.Pod) + pod, _ := obj.(*kubernetes.Pod) k.removePod(pod) }, }) From cfd1f1cd26450e2c77d3f73f0656caa76f183ff1 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 14 Oct 2024 12:24:00 -0400 Subject: [PATCH 30/90] Use journald for system module on Debian 12 (#41061) This commit adds Debian 12 support to our system module, to support Debian 12 we need to use the journald input to collect the system logs. To support it, a new, internal, input `system-logs`is introduced, it is responsible for deciding whether the log input or journald must be used. If `var.paths` is defined in the module configuration, `system-logs` looks at the files, if any of the globs resolves to one or more files the `log` input is used, otherwise the `jouranld` input is used. This behaviour can be overridden by setting `var.use_journald` or `var.use_files`, which will force the use of journald or files. Other changes: - Journald input now support filtering by facilities - System tests for modules now support handling journal files - The `TESTING_FILEBEAT_FILEPATTERN` environment variable now is a comma separated list of globs, it defaults to `.log,*.journal` - Multiple lint warnings are fixed - The documentation has been updated where needed. --- CHANGELOG.next.asciidoc | 3 + filebeat/docs/include/use-journald.asciidoc | 12 + filebeat/docs/include/var-paths.asciidoc | 2 +- filebeat/docs/inputs/input-journald.asciidoc | 7 + filebeat/docs/modules/system.asciidoc | 6 +- filebeat/filebeat.reference.yml | 30 +- filebeat/fileset/fileset.go | 31 +- filebeat/include/list.go | 1 + filebeat/input/default-inputs/inputs_linux.go | 2 + filebeat/input/journald/README.md | 57 +++ filebeat/input/journald/config.go | 3 + filebeat/input/journald/input.go | 8 +- .../input/journald/pkg/journalctl/reader.go | 5 + .../journald/pkg/journalctl/reader_test.go | 2 +- filebeat/input/systemlogs/input.go | 234 +++++++++++ filebeat/input/systemlogs/input_linux.go | 49 +++ filebeat/input/systemlogs/input_other.go | 31 ++ filebeat/module/system/README.md | 14 + .../module/system/_meta/config.reference.yml | 30 +- filebeat/module/system/_meta/config.yml | 27 ++ filebeat/module/system/_meta/docs.asciidoc | 6 +- filebeat/module/system/auth/config/auth.yml | 36 +- .../module/system/auth/ingest/entrypoint.yml | 15 + .../auth/ingest/{pipeline.yml => files.yml} | 13 +- .../system/auth/ingest/grok-auth-messages.yml | 14 + .../module/system/auth/ingest/journald.yml | 201 +++++++++ filebeat/module/system/auth/manifest.yml | 10 +- .../test/auth-ubuntu1204.log-expected.json | 200 ++++----- .../module/system/auth/test/debian-12.export | Bin 0 -> 8838 bytes .../module/system/auth/test/debian-12.journal | Bin 0 -> 8388608 bytes .../auth/test/debian-12.journal-expected.json | 383 ++++++++++++++++++ .../auth/test/secure-rhel7.log-expected.json | 200 ++++----- .../system/auth/test/test.log-expected.json | 22 +- .../auth/test/timestamp.log-expected.json | 4 +- .../module/system/syslog/config/syslog.yml | 45 +- .../system/syslog/ingest/entrypoint.yml | 15 + .../syslog/ingest/{pipeline.yml => files.yml} | 0 .../module/system/syslog/ingest/journald.yml | 29 ++ filebeat/module/system/syslog/manifest.yml | 10 +- .../darwin-syslog-sample.log-expected.json | 6 +- .../test/darwin-syslog.log-expected.json | 200 ++++----- .../system/syslog/test/debian-12.export | Bin 0 -> 2133 bytes .../system/syslog/test/debian-12.journal | Bin 0 -> 8388608 bytes .../test/debian-12.journal-expected.json | 63 +++ .../syslog/test/suse-syslog.log-expected.json | 4 +- .../syslog/test/tz-offset.log-expected.json | 6 +- filebeat/modules.d/system.yml.disabled | 27 ++ .../system/config/filebeat_modules.yml.j2 | 2 + filebeat/tests/system/test_modules.py | 51 ++- libbeat/tests/system/beat/beat.py | 2 +- x-pack/filebeat/filebeat.reference.yml | 30 +- 51 files changed, 1761 insertions(+), 387 deletions(-) create mode 100644 filebeat/docs/include/use-journald.asciidoc create mode 100644 filebeat/input/journald/README.md create mode 100644 filebeat/input/systemlogs/input.go create mode 100644 filebeat/input/systemlogs/input_linux.go create mode 100644 filebeat/input/systemlogs/input_other.go create mode 100644 filebeat/module/system/README.md create mode 100644 filebeat/module/system/auth/ingest/entrypoint.yml rename filebeat/module/system/auth/ingest/{pipeline.yml => files.yml} (88%) create mode 100644 filebeat/module/system/auth/ingest/grok-auth-messages.yml create mode 100644 filebeat/module/system/auth/ingest/journald.yml create mode 100644 filebeat/module/system/auth/test/debian-12.export create mode 100644 filebeat/module/system/auth/test/debian-12.journal create mode 100644 filebeat/module/system/auth/test/debian-12.journal-expected.json create mode 100644 filebeat/module/system/syslog/ingest/entrypoint.yml rename filebeat/module/system/syslog/ingest/{pipeline.yml => files.yml} (100%) create mode 100644 filebeat/module/system/syslog/ingest/journald.yml create mode 100644 filebeat/module/system/syslog/test/debian-12.export create mode 100644 filebeat/module/system/syslog/test/debian-12.journal create mode 100644 filebeat/module/system/syslog/test/debian-12.journal-expected.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2118be15ec8c..ebd20cb190cb 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -46,6 +46,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Added `container.image.name` to `journald` Filebeat input's Docker-specific translated fields. {pull}40450[40450] - Change log.file.path field in awscloudwatch input to nested object. {pull}41099[41099] - Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] +- System module events now contain `input.type: systemlogs` instead of `input.type: log` when harvesting log files. {pull}41061[41061] *Heartbeat* @@ -324,6 +325,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Improved GCS input documentation. {pull}41143[41143] - Add CSV decoding capacity to azureblobstorage input {pull}40978[40978] - Add CSV decoding capacity to gcs input {pull}40979[40979] +- Jounrald input now supports filtering by facilities {pull}41061[41061] +- System module now supports reading from jounrald. {pull}41061[41061] *Auditbeat* diff --git a/filebeat/docs/include/use-journald.asciidoc b/filebeat/docs/include/use-journald.asciidoc new file mode 100644 index 000000000000..12cb33c0c6ce --- /dev/null +++ b/filebeat/docs/include/use-journald.asciidoc @@ -0,0 +1,12 @@ +*`var.use_journald`*:: + +A boolean that when set to `true` will read logs from Journald. When +Journald is used all events contain the tag `journald` + +*`var.use_files`*:: + +A boolean that when set to `true` will read logs from the log files +defined by `vars.paths`. + +If neither `var.use_journald` nor `var.use_files` are set (or both are +`false`) {beatname_uc} will auto-detect the source for the logs. diff --git a/filebeat/docs/include/var-paths.asciidoc b/filebeat/docs/include/var-paths.asciidoc index dae14a9e22bd..ea6f7d1c6ae0 100644 --- a/filebeat/docs/include/var-paths.asciidoc +++ b/filebeat/docs/include/var-paths.asciidoc @@ -6,4 +6,4 @@ are also supported here. For example, you can use wildcards to fetch all files from a predefined level of subdirectories: `/path/to/log/*/*.log`. This fetches all `.log` files from the subfolders of `/path/to/log`. It does not fetch log files from the `/path/to/log` folder itself. If this setting is left -empty, {beatname_uc} will choose log paths based on your operating system. \ No newline at end of file +empty, {beatname_uc} will choose log paths based on your operating system. diff --git a/filebeat/docs/inputs/input-journald.asciidoc b/filebeat/docs/inputs/input-journald.asciidoc index a4433e67ce04..a0402d4e5838 100644 --- a/filebeat/docs/inputs/input-journald.asciidoc +++ b/filebeat/docs/inputs/input-journald.asciidoc @@ -169,6 +169,13 @@ Valid transports: * stdout: messages from a service's standard output or error output * kernel: messages from the kernel +[float] +[id="{beatname_lc}-input-{type}-facilities"] +==== `facilities` + +Filter entries by facilities, facilities must be specified using their +numeric code. + [float] [id="{beatname_lc}-input-{type}-include-matches"] ==== `include_matches` diff --git a/filebeat/docs/modules/system.asciidoc b/filebeat/docs/modules/system.asciidoc index 1866f2d5c259..88cb1f78a1c3 100644 --- a/filebeat/docs/modules/system.asciidoc +++ b/filebeat/docs/modules/system.asciidoc @@ -23,7 +23,7 @@ include::../include/gs-link.asciidoc[] === Compatibility This module was tested with logs from OSes like Ubuntu 12.04, Centos 7, and -macOS Sierra. +macOS Sierra. For Debian 12 Journald is used to read the system logs. This module is not available for Windows. @@ -65,11 +65,15 @@ include::../include/config-option-intro.asciidoc[] include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] + [float] ==== `auth` fileset settings include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] + *`var.tags`*:: A list of tags to include in events. Including `forwarded` indicates that the diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 14e9f276fb49..a1af7b861d5b 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -21,7 +21,18 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Input configuration (advanced). Any input configuration option + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # Input configuration (advanced). + # Any input configuration option # can be added under this section. #input: @@ -33,6 +44,23 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # A list of tags to include in events. Including 'forwarded' + # indicates that the events did not originate on this host and + # causes host.name to not be added to events. Include + # 'preserve_orginal_event' causes the pipeline to retain the raw log + # in event.original. Defaults to []. + #var.tags: [] + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/filebeat/fileset/fileset.go b/filebeat/fileset/fileset.go index 7fc91135dcc8..b4546327e492 100644 --- a/filebeat/fileset/fileset.go +++ b/filebeat/fileset/fileset.go @@ -24,7 +24,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -143,11 +142,11 @@ type ProcessorRequirement struct { func (fs *Fileset) readManifest() (*manifest, error) { cfg, err := common.LoadFile(filepath.Join(fs.modulePath, fs.name, "manifest.yml")) if err != nil { - return nil, fmt.Errorf("Error reading manifest file: %v", err) + return nil, fmt.Errorf("Error reading manifest file: %w", err) } manifest, err := newManifest(cfg) if err != nil { - return nil, fmt.Errorf("Error unpacking manifest: %v", err) + return nil, fmt.Errorf("Error unpacking manifest: %w", err) } return manifest, nil } @@ -183,7 +182,7 @@ func (fs *Fileset) evaluateVars(info beat.Info) (map[string]interface{}, error) vars[name], err = resolveVariable(vars, value) if err != nil { - return nil, fmt.Errorf("Error resolving variables on %s: %v", name, err) + return nil, fmt.Errorf("Error resolving variables on %s: %w", name, err) } } @@ -246,7 +245,7 @@ func resolveVariable(vars map[string]interface{}, value interface{}) (interface{ if ok { transf, err := ApplyTemplate(vars, s, false) if err != nil { - return nil, fmt.Errorf("array: %v", err) + return nil, fmt.Errorf("array: %w", err) } transformed = append(transformed, transf) } else { @@ -322,25 +321,27 @@ func getTemplateFunctions(vars map[string]interface{}) (template.FuncMap, error) // getBuiltinVars computes the supported built in variables and groups them // in a dictionary func (fs *Fileset) getBuiltinVars(info beat.Info) (map[string]interface{}, error) { - host, err := os.Hostname() - if err != nil || len(host) == 0 { + osHost, err := os.Hostname() + if err != nil || len(osHost) == 0 { return nil, fmt.Errorf("Error getting the hostname: %w", err) } - split := strings.SplitN(host, ".", 2) + split := strings.SplitN(osHost, ".", 2) hostname := split[0] domain := "" if len(split) > 1 { domain = split[1] } - return map[string]interface{}{ + vars := map[string]interface{}{ "prefix": info.IndexPrefix, "hostname": hostname, "domain": domain, "module": fs.mname, "fileset": fs.name, "beatVersion": info.Version, - }, nil + } + + return vars, nil } func (fs *Fileset) getInputConfig() (*conf.C, error) { @@ -348,7 +349,7 @@ func (fs *Fileset) getInputConfig() (*conf.C, error) { if err != nil { return nil, fmt.Errorf("Error expanding vars on the input path: %w", err) } - contents, err := ioutil.ReadFile(filepath.Join(fs.modulePath, fs.name, path)) + contents, err := os.ReadFile(filepath.Join(fs.modulePath, fs.name, path)) if err != nil { return nil, fmt.Errorf("Error reading input file %s: %w", path, err) } @@ -434,7 +435,7 @@ func (fs *Fileset) GetPipelines(esVersion version.V) (pipelines []pipeline, err return nil, fmt.Errorf("Error expanding vars on the ingest pipeline path: %w", err) } - strContents, err := ioutil.ReadFile(filepath.Join(fs.modulePath, fs.name, path)) + strContents, err := os.ReadFile(filepath.Join(fs.modulePath, fs.name, path)) if err != nil { return nil, fmt.Errorf("Error reading pipeline file %s: %w", path, err) } @@ -458,7 +459,11 @@ func (fs *Fileset) GetPipelines(esVersion version.V) (pipelines []pipeline, err if err != nil { return nil, fmt.Errorf("Failed to sanitize the YAML pipeline file: %s: %w", path, err) } - content = newContent.(map[string]interface{}) + var ok bool + content, ok = newContent.(map[string]interface{}) + if !ok { + return nil, errors.New("cannot convert newContent to map[string]interface{}") + } default: return nil, fmt.Errorf("Unsupported extension '%s' for pipeline file: %s", extension, path) } diff --git a/filebeat/include/list.go b/filebeat/include/list.go index d0c0ea511c4e..e2a656a2a856 100644 --- a/filebeat/include/list.go +++ b/filebeat/include/list.go @@ -33,6 +33,7 @@ import ( _ "github.com/elastic/beats/v7/filebeat/input/redis" _ "github.com/elastic/beats/v7/filebeat/input/stdin" _ "github.com/elastic/beats/v7/filebeat/input/syslog" + _ "github.com/elastic/beats/v7/filebeat/input/systemlogs" _ "github.com/elastic/beats/v7/filebeat/module/apache" _ "github.com/elastic/beats/v7/filebeat/module/auditd" _ "github.com/elastic/beats/v7/filebeat/module/elasticsearch" diff --git a/filebeat/input/default-inputs/inputs_linux.go b/filebeat/input/default-inputs/inputs_linux.go index 8eed9a3ea4f5..ec37894d26ae 100644 --- a/filebeat/input/default-inputs/inputs_linux.go +++ b/filebeat/input/default-inputs/inputs_linux.go @@ -19,6 +19,7 @@ package inputs import ( "github.com/elastic/beats/v7/filebeat/input/journald" + "github.com/elastic/beats/v7/filebeat/input/systemlogs" v2 "github.com/elastic/beats/v7/filebeat/input/v2" cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" "github.com/elastic/beats/v7/libbeat/beat" @@ -37,6 +38,7 @@ func osInputs(info beat.Info, log *logp.Logger, components osComponents) []v2.Pl zeroPlugin := v2.Plugin{} if journald := journald.Plugin(log, components); journald != zeroPlugin { plugins = append(plugins, journald) + plugins = append(plugins, systemlogs.PluginV2(log, components)) } return plugins diff --git a/filebeat/input/journald/README.md b/filebeat/input/journald/README.md new file mode 100644 index 000000000000..67038bd76231 --- /dev/null +++ b/filebeat/input/journald/README.md @@ -0,0 +1,57 @@ +# Journald input + +The Journald input reads journal entries by calling `journalctl`. + +## Adding entries to the journal +The easiest way to add entries to the journal is to use `systemd-cat`: +``` +root@vagrant-debian-12:~/filebeat# echo "Hello Journal!" | systemd-cat +root@vagrant-debian-12:~/filebeat# journalctl -n 1 +Oct 02 04:17:01 vagrant-debian-12 CRON[1912]: pam_unix(cron:session): session closed for user root +``` + +The syslog identifier can be specified with the `-t` parameter: +``` +root@vagrant-debian-12:~/filebeat# echo "Hello Journal!" | systemd-cat -t my-test +root@vagrant-debian-12:~/filebeat# journalctl -n 1 +Oct 02 04:17:50 vagrant-debian-12 my-test[1924]: Hello Journal! +``` + +## Crafting a journal file +The easiest way to craft a journal file with the entries you need is +to use +[`systemd-journald-remote`](https://www.freedesktop.org/software/systemd/man/latest/systemd-journal-remote.service.html). +First we need to export some entries to a file: +``` +root@vagrant-debian-12:~/filebeat# journalctl -g "Hello" -o export >export +``` +One good thing of the `-o export` is that you can just concatenate the +output of any number of runs and the result will be a valid file. + +Then you can use `systemd-journald-remote` to generate the journal +file: +``` +root@vagrant-debian-12:~/filebeat# /usr/lib/systemd/systemd-journal-remote -o example.journal export +Finishing after writing 2 entries +`` +Or you can run as a one liner: +``` +root@vagrant-debian-12:~/filebeat# journalctl -g "Hello" -o export | /usr/lib/systemd/systemd-journal-remote -o example.journal - +``` + +Then you can read the newly created file: +``` +root@vagrant-debian-12:~/filebeat# journalctl --file ./example.journal +Oct 02 04:16:54 vagrant-debian-12 unknown[1908]: Hello Journal! +Oct 02 04:17:50 vagrant-debian-12 my-test[1924]: Hello Journal! +root@vagrant-debian-12:~/filebeat# +``` + +Bear in mind that `systemd-journal-remote` will **append** to the +output file. + +## References +- https://systemd.io/JOURNAL_NATIVE_PROTOCOL/ +- https://www.freedesktop.org/software/systemd/man/latest/journalctl.html +- https://www.freedesktop.org/software/systemd/man/latest/systemd-cat.html +- https://www.freedesktop.org/software/systemd/man/latest/systemd-journal-remote.service.html diff --git a/filebeat/input/journald/config.go b/filebeat/input/journald/config.go index bdcd980e4842..d354baaacf57 100644 --- a/filebeat/input/journald/config.go +++ b/filebeat/input/journald/config.go @@ -63,6 +63,9 @@ type config struct { // SaveRemoteHostname defines if the original source of the entry needs to be saved. SaveRemoteHostname bool `config:"save_remote_hostname"` + // Facility is a list of facilities to filter journal messages + Facilities []int `config:"facilities"` + // Parsers configuration Parsers parser.Config `config:",inline"` } diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index 9ce61042791a..20e46bd0cc21 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -50,6 +50,7 @@ type journald struct { Units []string Transports []string Identifiers []string + Facilities []int SaveRemoteHostname bool Parsers parser.Config Journalctl bool @@ -79,7 +80,7 @@ func Plugin(log *logp.Logger, store cursor.StateStore) input.Plugin { Logger: log, StateStore: store, Type: pluginName, - Configure: configure, + Configure: Configure, }, } } @@ -90,7 +91,7 @@ var cursorVersion = 1 func (p pathSource) Name() string { return string(p) } -func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { +func Configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { config := defaultConfig() if err := cfg.Unpack(&config); err != nil { return nil, nil, err @@ -113,6 +114,7 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { Units: config.Units, Transports: config.Transports, Identifiers: config.Identifiers, + Facilities: config.Facilities, SaveRemoteHostname: config.SaveRemoteHostname, Parsers: config.Parsers, }, nil @@ -128,6 +130,7 @@ func (inp *journald) Test(src cursor.Source, ctx input.TestContext) error { inp.Identifiers, inp.Transports, inp.Matches, + inp.Facilities, journalctl.SeekHead, "", inp.Since, @@ -158,6 +161,7 @@ func (inp *journald) Run( inp.Identifiers, inp.Transports, inp.Matches, + inp.Facilities, mode, pos, inp.Since, diff --git a/filebeat/input/journald/pkg/journalctl/reader.go b/filebeat/input/journald/pkg/journalctl/reader.go index 25b90d9a490f..b530e942b237 100644 --- a/filebeat/input/journald/pkg/journalctl/reader.go +++ b/filebeat/input/journald/pkg/journalctl/reader.go @@ -137,6 +137,7 @@ func New( syslogIdentifiers []string, transports []string, matchers journalfield.IncludeMatches, + facilities []int, mode SeekMode, cursor string, since time.Duration, @@ -166,6 +167,10 @@ func New( args = append(args, fmt.Sprintf("_TRANSPORT=%s", m)) } + for _, facility := range facilities { + args = append(args, "--facility", fmt.Sprintf("%d", facility)) + } + otherArgs := handleSeekAndCursor(mode, since, cursor) jctl, err := newJctl(canceler, logger.Named("journalctl-runner"), "journalctl", append(args, otherArgs...)...) diff --git a/filebeat/input/journald/pkg/journalctl/reader_test.go b/filebeat/input/journald/pkg/journalctl/reader_test.go index 2cd29e83a355..af3837fd09c1 100644 --- a/filebeat/input/journald/pkg/journalctl/reader_test.go +++ b/filebeat/input/journald/pkg/journalctl/reader_test.go @@ -97,7 +97,7 @@ func TestRestartsJournalctlOnError(t *testing.T) { return &mock, nil } - reader, err := New(logp.L(), ctx, nil, nil, nil, journalfield.IncludeMatches{}, SeekHead, "", 0, "", factory) + reader, err := New(logp.L(), ctx, nil, nil, nil, journalfield.IncludeMatches{}, []int{}, SeekHead, "", 0, "", factory) if err != nil { t.Fatalf("cannot instantiate journalctl reader: %s", err) } diff --git a/filebeat/input/systemlogs/input.go b/filebeat/input/systemlogs/input.go new file mode 100644 index 000000000000..789fd65ad5d9 --- /dev/null +++ b/filebeat/input/systemlogs/input.go @@ -0,0 +1,234 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemlogs + +import ( + "errors" + "fmt" + "path/filepath" + + "github.com/elastic/beats/v7/filebeat/channel" + v1 "github.com/elastic/beats/v7/filebeat/input" + loginput "github.com/elastic/beats/v7/filebeat/input/log" + v2 "github.com/elastic/beats/v7/filebeat/input/v2" + cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" + "github.com/elastic/beats/v7/libbeat/feature" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" +) + +const pluginName = "system-logs" + +func init() { + // Register an input V1, that's used by the log input + if err := v1.Register(pluginName, newV1Input); err != nil { + panic(err) + } +} + +type config struct { + UseJournald bool `config:"use_journald"` + UseFiles bool `config:"use_files"` + Files *conf.C `config:"files" yaml:"files"` + Journald *conf.C `config:"journald" yaml:"journald"` +} + +func (c *config) Validate() error { + if c.UseFiles && c.UseJournald { + return errors.New("'use_journald' and 'use_files' cannot both be true") + } + + if c.Files == nil && c.Journald == nil { + return errors.New("one of 'journald' or 'files' must be set") + } + + return nil +} + +// newV1Input checks whether the log input must be created and +// delegates to loginput.NewInput if needed. +func newV1Input( + cfg *conf.C, + outlet channel.Connector, + context v1.Context, +) (v1.Input, error) { + journald, err := useJournald(cfg) + if err != nil { + return nil, fmt.Errorf("cannot decide between journald and files: %w", err) + } + + if journald { + return nil, v2.ErrUnknownInput + } + + // Convert the configuration and create a log input + logCfg, err := toFilesConfig(cfg) + if err != nil { + return nil, err + } + + return loginput.NewInput(logCfg, outlet, context) +} + +// PluginV2 creates a v2.Plugin that will instantiate a journald +// input if needed. +func PluginV2(logger *logp.Logger, store cursor.StateStore) v2.Plugin { + logger = logger.Named(pluginName) + + return v2.Plugin{ + Name: pluginName, + Stability: feature.Stable, + Deprecated: false, + Info: "system-logs input", + Doc: "The system-logs input collects system logs on Linux by reading them from journald or traditional log files", + Manager: &cursor.InputManager{ + Logger: logger, + StateStore: store, + Type: pluginName, + Configure: configure, + }, + } +} + +// useJournald returns true if jounrald should be used. +// If there is an error, false is always retruned. +// +// The decision logic is: +// - If UseJournald is set, return true +// - If UseFiles is set, return false +// - If the globs defined in `files.paths` match any existing file, +// return false +// - Otherwise return true +func useJournald(c *conf.C) (bool, error) { + cfg := config{} + if err := c.Unpack(&cfg); err != nil { + return false, nil + } + + if cfg.UseJournald { + return true, nil + } + + if cfg.UseFiles { + return false, nil + } + + globs := struct { + Paths []string `config:"files.paths"` + }{} + + if err := c.Unpack(&globs); err != nil { + return false, fmt.Errorf("cannot parse paths from config: %w", err) + } + + for _, g := range globs.Paths { + paths, err := filepath.Glob(g) + if err != nil { + return false, fmt.Errorf("cannot resolve glob: %w", err) + } + if len(paths) != 0 { + // We found at least one system log file, + // journald will not be used, return early + return false, nil + } + } + + // if no system log files are found, then use jounrald + return true, nil +} + +func toJournaldConfig(cfg *conf.C) (*conf.C, error) { + newCfg, err := cfg.Child("journald", -1) + if err != nil { + return nil, fmt.Errorf("cannot extract 'journald' block: %w", err) + } + + if _, err := cfg.Remove("journald", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("type", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("files", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("use_journald", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("use_files", -1); err != nil { + return nil, err + } + + if err := newCfg.Merge(cfg); err != nil { + return nil, err + } + + if err := newCfg.SetString("type", -1, "journald"); err != nil { + return nil, fmt.Errorf("cannot set 'type': %w", err) + } + + if err := cfg.SetString("type", -1, pluginName); err != nil { + return nil, fmt.Errorf("cannot set type back to '%s': %w", pluginName, err) + } + + return newCfg, nil +} + +func toFilesConfig(cfg *conf.C) (*conf.C, error) { + newCfg, err := cfg.Child("files", -1) + if err != nil { + return nil, fmt.Errorf("cannot extract 'journald' block: %w", err) + } + + if _, err := cfg.Remove("journald", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("type", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("files", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("use_journald", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("use_files", -1); err != nil { + return nil, err + } + + if err := newCfg.Merge(cfg); err != nil { + return nil, err + } + + if err := newCfg.SetString("type", -1, "log"); err != nil { + return nil, fmt.Errorf("cannot set 'type': %w", err) + } + + if err := cfg.SetString("type", -1, pluginName); err != nil { + return nil, fmt.Errorf("cannot set type back to '%s': %w", pluginName, err) + } + return newCfg, nil +} diff --git a/filebeat/input/systemlogs/input_linux.go b/filebeat/input/systemlogs/input_linux.go new file mode 100644 index 000000000000..5a98c270b97f --- /dev/null +++ b/filebeat/input/systemlogs/input_linux.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package systemlogs + +import ( + "fmt" + + "github.com/elastic/beats/v7/filebeat/input/journald" + v2 "github.com/elastic/beats/v7/filebeat/input/v2" + cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" + conf "github.com/elastic/elastic-agent-libs/config" +) + +// configure checks whether the journald input must be created and +// delegates to journald.Configure if needed. +func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { + jouranl, err := useJournald(cfg) + if err != nil { + return nil, nil, fmt.Errorf("cannot decide between journald and files: %w", err) + } + + if !jouranl { + return nil, nil, v2.ErrUnknownInput + } + + journaldCfg, err := toJournaldConfig(cfg) + if err != nil { + return nil, nil, err + } + + return journald.Configure(journaldCfg) +} diff --git a/filebeat/input/systemlogs/input_other.go b/filebeat/input/systemlogs/input_other.go new file mode 100644 index 000000000000..ab21d3864b17 --- /dev/null +++ b/filebeat/input/systemlogs/input_other.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !linux + +package systemlogs + +import ( + "errors" + + cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" + conf "github.com/elastic/elastic-agent-libs/config" +) + +func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { + return nil, nil, errors.New("journald is only supported on Linux") +} diff --git a/filebeat/module/system/README.md b/filebeat/module/system/README.md new file mode 100644 index 000000000000..2471264cfcf2 --- /dev/null +++ b/filebeat/module/system/README.md @@ -0,0 +1,14 @@ +# Journald tests (Debian 12) +The tests for the journald input (currently only used for Debian 12 +testing) require journal files (test files ending in `.journal`), those +files are generated using `systemd-journal-remote` (see the [Journald +input README.md](../../input/journald/README.md) for more details). + +The source for those journal files are the `.export` files in the test +folder. Those files are the raw output of `journalctl -o export`. They +are added here because journal files format change with different +versions of journald, which can cause `journalclt` to fail reading +them, which leads to test failures. So if tests start failing because +`journalctl` cannot read the journal files as expected, new ones can +easily be generated with the same version of journalctl used on CI +and the original dataset. diff --git a/filebeat/module/system/_meta/config.reference.yml b/filebeat/module/system/_meta/config.reference.yml index 3c7a0b43d499..04160dfb1bf1 100644 --- a/filebeat/module/system/_meta/config.reference.yml +++ b/filebeat/module/system/_meta/config.reference.yml @@ -7,7 +7,18 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Input configuration (advanced). Any input configuration option + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # Input configuration (advanced). + # Any input configuration option # can be added under this section. #input: @@ -19,6 +30,23 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # A list of tags to include in events. Including 'forwarded' + # indicates that the events did not originate on this host and + # causes host.name to not be added to events. Include + # 'preserve_orginal_event' causes the pipeline to retain the raw log + # in event.original. Defaults to []. + #var.tags: [] + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/filebeat/module/system/_meta/config.yml b/filebeat/module/system/_meta/config.yml index c1fe882374d3..f95f3e5969df 100644 --- a/filebeat/module/system/_meta/config.yml +++ b/filebeat/module/system/_meta/config.yml @@ -7,6 +7,16 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + # Authorization logs auth: enabled: false @@ -14,3 +24,20 @@ # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: + + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # A list of tags to include in events. Including forwarded + # indicates that the events did not originate on this host and + # causes host.name to not be added to events. Include + # preserve_orginal_event causes the pipeline to retain the raw log + # in event.original. Defaults to []. + #var.tags: [] diff --git a/filebeat/module/system/_meta/docs.asciidoc b/filebeat/module/system/_meta/docs.asciidoc index 6d9209eafe20..1aaca678963f 100644 --- a/filebeat/module/system/_meta/docs.asciidoc +++ b/filebeat/module/system/_meta/docs.asciidoc @@ -16,7 +16,7 @@ include::../include/gs-link.asciidoc[] === Compatibility This module was tested with logs from OSes like Ubuntu 12.04, Centos 7, and -macOS Sierra. +macOS Sierra. For Debian 12 Journald is used to read the system logs. This module is not available for Windows. @@ -58,11 +58,15 @@ include::../include/config-option-intro.asciidoc[] include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] + [float] ==== `auth` fileset settings include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] + *`var.tags`*:: A list of tags to include in events. Including `forwarded` indicates that the diff --git a/filebeat/module/system/auth/config/auth.yml b/filebeat/module/system/auth/config/auth.yml index 466b55078af5..3affe320fb0c 100644 --- a/filebeat/module/system/auth/config/auth.yml +++ b/filebeat/module/system/auth/config/auth.yml @@ -1,17 +1,33 @@ -type: log -paths: -{{ range $i, $path := .paths }} - - {{$path}} +type: system-logs +{{ if .use_journald }} +use_journald: true {{ end }} -exclude_files: [".gz$"] -multiline: - pattern: "^\\s" - match: after +{{ if .use_files }} +use_files: true +{{ end }} +tags: {{ .tags | tojson }} processors: - add_locale: ~ -tags: {{ .tags | tojson }} - publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} + +journald: + id: system-auth + facilities: + - 4 + - 10 + +files: + id: system-auth + paths: + {{ range $i, $path := .paths }} + - {{$path}} + {{ end }} + exclude_files: [".gz$"] + + multiline: + pattern: "^\\s" + match: after + diff --git a/filebeat/module/system/auth/ingest/entrypoint.yml b/filebeat/module/system/auth/ingest/entrypoint.yml new file mode 100644 index 000000000000..93869fd1486f --- /dev/null +++ b/filebeat/module/system/auth/ingest/entrypoint.yml @@ -0,0 +1,15 @@ +description: Entrypoint Pipeline for system/auth Filebeat module +processors: + - script: + source: | + if(ctx?.journald != null){ + ctx['auth_pipeline'] = '{< IngestPipeline "journald" >}'; + return; + } + ctx['auth_pipeline'] = '{< IngestPipeline "files" >}'; + return; + - pipeline: + name: "{{ auth_pipeline }}" + - remove: + ignore_failure: true + field: "auth_pipeline" diff --git a/filebeat/module/system/auth/ingest/pipeline.yml b/filebeat/module/system/auth/ingest/files.yml similarity index 88% rename from filebeat/module/system/auth/ingest/pipeline.yml rename to filebeat/module/system/auth/ingest/files.yml index c89ef94b28a7..39611f484a82 100644 --- a/filebeat/module/system/auth/ingest/pipeline.yml +++ b/filebeat/module/system/auth/ingest/files.yml @@ -18,18 +18,9 @@ processors: TIMESTAMP: (?:%{TIMESTAMP_ISO8601}|%{SYSLOGTIMESTAMP}) patterns: - '^%{TIMESTAMP:system.auth.timestamp} %{SYSLOGHOST:host.hostname}? %{DATA:process.name}(?:\[%{POSINT:process.pid:long}\])?:%{SPACE}%{GREEDYMULTILINE:_temp.message}$' - - grok: + - pipeline: description: Grok specific auth messages. - tag: grok-specific-messages - field: _temp.message - ignore_missing: true - patterns: - - '^%{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user)?%{DATA:user.name} from %{IPORHOST:source.address} port %{NUMBER:source.port:long} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?' - - '^%{DATA:system.auth.ssh.event} user %{DATA:user.name} from %{IPORHOST:source.address}' - - '^Did not receive identification string from %{IPORHOST:system.auth.ssh.dropped_ip}' - - '^%{DATA:user.name} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}' - - '^new group: name=%{DATA:group.name}, GID=%{NUMBER:group.id}' - - '^new user: name=%{DATA:user.name}, UID=%{NUMBER:user.id}, GID=%{NUMBER:group.id}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$' + name: '{< IngestPipeline "grok-auth-messages" >}' on_failure: - rename: description: Leave the unmatched content in message. diff --git a/filebeat/module/system/auth/ingest/grok-auth-messages.yml b/filebeat/module/system/auth/ingest/grok-auth-messages.yml new file mode 100644 index 000000000000..fc09abbff5e7 --- /dev/null +++ b/filebeat/module/system/auth/ingest/grok-auth-messages.yml @@ -0,0 +1,14 @@ +description: Journald Pipeline for system/auth Filebeat module +processors: + - grok: + description: Grok specific auth messages. + tag: grok-specific-messages + field: _temp.message + ignore_missing: true + patterns: + - '^%{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user)?%{DATA:user.name} from %{IPORHOST:source.address} port %{NUMBER:source.port:long} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?' + - '^%{DATA:system.auth.ssh.event} user %{DATA:user.name} from %{IPORHOST:source.address}' + - '^Did not receive identification string from %{IPORHOST:system.auth.ssh.dropped_ip}' + - '^%{DATA:user.name} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}' + - '^new group: name=%{DATA:group.name}, GID=%{NUMBER:group.id}' + - '^new user: name=%{DATA:user.name}, UID=%{NUMBER:user.id}, GID=%{NUMBER:group.id}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$' diff --git a/filebeat/module/system/auth/ingest/journald.yml b/filebeat/module/system/auth/ingest/journald.yml new file mode 100644 index 000000000000..10e7ae96054e --- /dev/null +++ b/filebeat/module/system/auth/ingest/journald.yml @@ -0,0 +1,201 @@ +description: Journald Pipeline for system/auth Filebeat module +processors: + - set: + field: event.ingested + copy_from: _ingest.timestamp + - rename: + field: "journald.process.name" + target_field: process.name + - rename: + field: message + target_field: _temp.message + - pipeline: + description: Grok specific auth messages. + name: '{< IngestPipeline "grok-auth-messages" >}' + ignore_failure: true + - rename: + field: _temp.message + target_field: message + - grok: + description: Grok usernames from PAM messages. + tag: grok-pam-users + field: message + ignore_missing: true + ignore_failure: true + patterns: + - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}? by %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?(?:\(uid=%{NUMBER:_temp.byuid}\))?$' + - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}?$' + - 'by user %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?$' + - '%{BOUNDARY} user %{QUOTE}%{DATA:_temp.user}%{QUOTE}' + pattern_definitions: + QUOTE: "['\"]" + BOUNDARY: "(?- + if (ctx.system.auth.ssh.event == "Accepted") { + ctx.event.type = ["info"]; + ctx.event.category = ["authentication", "session"]; + ctx.event.action = "ssh_login"; + ctx.event.outcome = "success"; + } else if (ctx.system.auth.ssh.event == "Invalid" || ctx.system.auth.ssh.event == "Failed") { + ctx.event.type = ["info"]; + ctx.event.category = ["authentication"]; + ctx.event.action = "ssh_login"; + ctx.event.outcome = "failure"; + } + - append: + field: event.category + value: iam + if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - set: + field: event.outcome + value: success + if: ctx.process?.name != null && (ctx.message == null || !ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - set: + field: event.outcome + value: failure + if: ctx.process?.name != null && (ctx.message != null && ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - append: + field: event.type + value: user + if: ctx.process?.name != null && ['useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - append: + field: event.type + value: group + if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod'].contains(ctx.process.name) + - append: + field: event.type + value: creation + if: ctx.process?.name != null && ['useradd', 'groupadd'].contains(ctx.process.name) + - append: + field: event.type + value: deletion + if: ctx.process?.name != null && ['userdel', 'groupdel'].contains(ctx.process.name) + - append: + field: event.type + value: change + if: ctx.process?.name != null && ['usermod', 'groupmod'].contains(ctx.process.name) + - append: + field: related.user + value: "{{{ user.name }}}" + allow_duplicates: false + if: ctx.user?.name != null && ctx.user?.name != '' + - append: + field: related.user + value: "{{{ user.effective.name }}}" + allow_duplicates: false + if: ctx.user?.effective?.name != null && ctx.user?.effective?.name != '' + - append: + field: related.ip + value: "{{{ source.ip }}}" + allow_duplicates: false + if: ctx.source?.ip != null && ctx.source?.ip != '' + - append: + field: related.hosts + value: "{{{ host.hostname }}}" + allow_duplicates: false + if: ctx.host?.hostname != null && ctx.host?.hostname != '' + - set: + field: ecs.version + value: 8.0.0 + - remove: + field: event.original + if: "ctx?.tags == null || !(ctx.tags.contains('preserve_original_event'))" + ignore_failure: true + ignore_missing: true + - remove: + description: Remove the extra fields added by the Journald input + ignore_missing: true + field: + - journald + - process.thread + - syslog + - systemd + - message_id +on_failure: + - set: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' diff --git a/filebeat/module/system/auth/manifest.yml b/filebeat/module/system/auth/manifest.yml index bf1a3623cf15..4b99d6407b76 100644 --- a/filebeat/module/system/auth/manifest.yml +++ b/filebeat/module/system/auth/manifest.yml @@ -12,6 +12,14 @@ var: os.windows: [] - name: tags default: [] + - name: use_journald + default: false + - name: use_files + default: false -ingest_pipeline: ingest/pipeline.yml +ingest_pipeline: + - ingest/entrypoint.yml + - ingest/files.yml + - ingest/journald.yml + - ingest/grok-auth-messages.yml input: config/auth.yml diff --git a/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json b/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json index ee5afe3f2356..6e2ffbeaa514 100644 --- a/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json +++ b/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 0, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -27,7 +27,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 81, "process.name": "sudo", "related.hosts": [ @@ -52,7 +52,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 464, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -75,7 +75,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 570, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -95,7 +95,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 655, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -116,7 +116,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 736, "process.name": "sudo", "related.hosts": [ @@ -141,7 +141,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1121, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -164,7 +164,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1227, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -184,7 +184,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1312, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -205,7 +205,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1393, "process.name": "sudo", "related.hosts": [ @@ -230,7 +230,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1776, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -253,7 +253,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1882, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -273,7 +273,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1967, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -294,7 +294,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2048, "process.name": "sudo", "related.hosts": [ @@ -319,7 +319,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2426, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -342,7 +342,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2532, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -362,7 +362,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2617, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -383,7 +383,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2698, "process.name": "sudo", "related.hosts": [ @@ -408,7 +408,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3083, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -431,7 +431,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3189, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -451,7 +451,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3274, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -471,7 +471,7 @@ "event.module": "system", "event.timezone": "-02:00", "fileset.name": "auth", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3355, "message": "last message repeated 2 times", "process.name": "sshd", @@ -485,7 +485,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3414, "process.name": "sudo", "related.hosts": [ @@ -510,7 +510,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3977, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -533,7 +533,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4083, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -553,7 +553,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4168, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -574,7 +574,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4249, "process.name": "sudo", "related.hosts": [ @@ -599,7 +599,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4632, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -622,7 +622,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4738, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -642,7 +642,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4823, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -663,7 +663,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4904, "process.name": "sudo", "related.hosts": [ @@ -688,7 +688,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5289, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -711,7 +711,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5395, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -731,7 +731,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5480, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -752,7 +752,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5561, "process.name": "sudo", "related.hosts": [ @@ -777,7 +777,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5942, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -800,7 +800,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6048, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -820,7 +820,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6133, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -841,7 +841,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6214, "process.name": "sudo", "related.hosts": [ @@ -866,7 +866,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6597, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -889,7 +889,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6703, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -909,7 +909,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6788, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -930,7 +930,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6869, "process.name": "sudo", "related.hosts": [ @@ -955,7 +955,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7254, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -978,7 +978,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7360, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -998,7 +998,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7445, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1019,7 +1019,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7526, "process.name": "sudo", "related.hosts": [ @@ -1044,7 +1044,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7911, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1067,7 +1067,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8017, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1087,7 +1087,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8102, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1108,7 +1108,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8183, "process.name": "sudo", "related.hosts": [ @@ -1133,7 +1133,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8564, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1156,7 +1156,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8670, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1176,7 +1176,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8755, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1197,7 +1197,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8836, "process.name": "sudo", "related.hosts": [ @@ -1222,7 +1222,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9215, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1245,7 +1245,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9321, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1265,7 +1265,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9406, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1286,7 +1286,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9487, "process.name": "sudo", "related.hosts": [ @@ -1311,7 +1311,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9869, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1334,7 +1334,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9975, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1354,7 +1354,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10060, "process.name": "sudo", "related.hosts": [ @@ -1379,7 +1379,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11099, "message": "vagrant : (command continued) '/etc/metricbeat/metricbeat.yml)", "process.name": "sudo", @@ -1395,7 +1395,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11195, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1418,7 +1418,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11301, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1438,7 +1438,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11386, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1459,7 +1459,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11467, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1480,7 +1480,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11548, "process.name": "sudo", "related.hosts": [ @@ -1505,7 +1505,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11928, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1528,7 +1528,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12034, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1548,7 +1548,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12119, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1569,7 +1569,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12200, "process.name": "sudo", "related.hosts": [ @@ -1594,7 +1594,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12583, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1617,7 +1617,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12689, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1637,7 +1637,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12774, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1658,7 +1658,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12855, "process.name": "sudo", "related.hosts": [ @@ -1683,7 +1683,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 13241, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1706,7 +1706,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 13347, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1726,7 +1726,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 13432, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1747,7 +1747,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 13513, "process.name": "sudo", "related.hosts": [ @@ -1772,7 +1772,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 13898, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1795,7 +1795,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14004, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1815,7 +1815,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14089, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1836,7 +1836,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14170, "process.name": "sudo", "related.hosts": [ @@ -1861,7 +1861,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14549, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1884,7 +1884,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14655, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1904,7 +1904,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14740, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1925,7 +1925,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14821, "process.name": "sudo", "related.hosts": [ @@ -1950,7 +1950,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15203, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1973,7 +1973,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15309, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1993,7 +1993,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15394, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -2014,7 +2014,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15475, "process.name": "sudo", "related.hosts": [ @@ -2039,7 +2039,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15860, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -2062,7 +2062,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15966, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -2082,7 +2082,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16051, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -2103,7 +2103,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16132, "process.name": "sudo", "related.hosts": [ @@ -2128,7 +2128,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16517, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -2151,7 +2151,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16623, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -2171,7 +2171,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16708, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -2192,7 +2192,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16789, "process.name": "sudo", "related.hosts": [ diff --git a/filebeat/module/system/auth/test/debian-12.export b/filebeat/module/system/auth/test/debian-12.export new file mode 100644 index 0000000000000000000000000000000000000000..583416f6c7b5e59569802999863ba1873e41600e GIT binary patch literal 8838 zcmeI2TXW(@7RUEl`z2I)UPIs2jY|(KmTgi7Ib7yasV!Bj)$)+B!3%h1^6mGuuC{|Q zB)h3(V;(@LyQOZ?zjOH?l4LNSOyfzo34@#%MzTCmR5LBO3ak{A=1K(|5o=EuAwEq* zn%W!-VNxqJLn@HQV1ZIWIL>p8_32%Rjn*l%hNP$4P!OVxU@Ao;WEfeF3OK-bTu5$KO}>AJc~Zeyubgg zd_649YMC#VHuJn>`f)moMrX3L}B+~v+i!v z#j;yA*Ap6COyc=<*e#b?PRVuYZ0!0=F}=Fyd_07kK5iL|&S2?L_?^3C&;O!t)_S@1 zf4pBWzWnPuFS(4TvvGeEg;mQ*X48dUo?y~Gkm2}UJm}Ac@!0)1hB2<;MN?{-!l_~u zX~7Ve0YWMl^=hx7FAYxtk5dVI)-ug_EfcdaE((|J;!Z}v{^s-O5=zYeYfBN)wb=ke7 zUnw4?7xC~S3;z9W{dxMs_sdm(jO@qn-(8;F-3`5@oozCW=aWJ7DmefyY?`AOCkHYv z1=ATN1>sa^qp3kUmq57S7+WbR%aNoxg&qFFJcB@CMnr%J3gD;QNjR7ouq>Fk_6qOz z@C|Q=c)fT3I(Tzq31^HLguzv1u0lu{3_gJ50&&hMMg`&}ro~3Ls zDii}sxh6c2NMyOtD(4QW-F~>3CzV7Z${fqP{4gU(5=EtIDW%Qf8>N2+N{@H4$5BY| zD-?3sQaD_G(YK4te}L%0-`dUAFWu@6@|ctgwq=Pr+$4DxO*||=FIQieZ8Wjhb+((G zP@X+^wwCMM)KF&B@KhQKP*NFWMrp*Ep-O28BKJVar3kQ6)IqlkN-h);Qc2m6 z=mNjWi@rhm7lraUAM4es4YjL8p@Ioacym;>?DU|bM;>|5bG^8A(eY#dS={7hgoU7_ za-Of!S?+p>9o2I#~D+@=s5nH*`Hx%l(tfELKa1T(NAFZj+tKwip;Yp>Ey5NzJ){1d54|AT<` zSNhK1gHCoBoo?4tC61*tIE+q=!9nVb*K``n4429{I&o%9V35c`DkjFoq%YG6xiqB+ zIel+uWJ?s0fkL7cdc#)!{p0ygFdGL*yD!VPzZ7ftN}^6y#t4Dwa>{af)SsUXX9Ck>twGHrCJr{vBdn4 zt!jz6IBX>5LSZgWl~bRB`U>K%%V6sJ*OIfFU?n&|n`?JtH=D(3dBQpyv$_Y3ydJyb za-}Z)Yyk9k%K6T)7xy^$#2W5K0PguSkl63Hn=bKB{p zsK7|fL~gX=fwG8_Kr*2?xVl$8yZqlzCJW~c*ALvAz1iKxR2iw)DnW?&k4rYTnxGt4B? z>3#uC63mH`)Wr_F1T+a2GLTZXk$>0<-2Zo`q25zFO3TkF1Sh;Nx^8RBKdm0tOMP1d z^$5|jPMn=U>3OlNL4AI*!S!~LR+Z(f{%l89)#L8oPETs%1ZqbJs*dpIgL>69m{051 zMa{mR)Ju7ay}(ckS5RTa@VAaU(6HT6#^Emt8e-@aGp`DT z+%nL literal 0 HcmV?d00001 diff --git a/filebeat/module/system/auth/test/debian-12.journal b/filebeat/module/system/auth/test/debian-12.journal new file mode 100644 index 0000000000000000000000000000000000000000..3195198e604154f8bd87062eb5c023fe2edd6f7d GIT binary patch literal 8388608 zcmeF)e{k3JeFyNbf{NPOw6@5!Yco`gzaSw$$Onvq7)UCRlmv%b7XySsN0Ec7tmi6i zr=^qISbyTS-Z))5ymfc<=xw=M>vXrRxb1pa^=duaxodAn*Xv}rT@Mk>vL9BbT3Bn9=<6S^ZPic@DMd zDOH(g`*m3kb^e(izboHos8rt-ssB7`Sq7)afef` z&dx^`pLaYoffs+7-ESg?HhcJ^Th7g%&&c6d9Nh|=CT0Cb4tKTQ{Qe#}+z8}IZAY@#Ph`38XQKE2N`9S3mb%9)uVvR?aWq>maNA*RMD$d%hyax}V{$-;#g*6gk{_gU#QzAoGhHZ2WM~ zPs{&3B67I(N^Sn${CbQWtiS#F*Q=3(jUVj%{A;u4Gjg!;gPq@<|NT93u009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1WH$6 zcYTsJ7g@TU>WlyZ0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAVA=B1P)!D{l2Qm)2U$&0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAJJc0Q~8`A=j!>eXC0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C7%2%MJHc6Y0EZ?2zoB#m=1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0w)So9BE7+h&=JEBLV~n5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1coE<;*Ci}|fB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyKwz{54xW{yOGJ+Ldg>D(K!5;&auld~SN8cbvK;rIX95HW5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXFl!HL~jY-;OWI6P#M*;*05FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+Kq(7UH)X#cDzcP&)f)i<1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAVA<`0xeb9?`MoW*%*%z zAV7cs0RjXF5FkK+009E!EwJsEjp+s=%X>GvCqRGz0RjXF5FkK+0D*E5I8>AU|LDka z>06%!2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZU?>8;M;p`OkwcklECB)p2oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfWXKL?0-G`{?5peUqM9z1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyPn7Fix$>yiKg0;MN#aB_A(jV!$mbwq#w z0RjXF5FkK+009C71{XMdZua-v$iZhAL4W`O0t8A=VABQJ=d;Mt>rh7o2oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkLHP}rJ(zg8p@0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!8B8K*hA|=jS7f&pReSfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C7CZ52aOOtfdkrPkAjtLMTK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UATTxp2WDjN2NyXuU09U>0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+!1xr{){vwdiyWW3v?~Gx2oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7e?=?ko@OVY`ar(elT0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pkr6;iK>LhI{vh+IC5di`O2oNAZfB*pk1PBly zK!5-N0t5&UAV7csfiovCts(pWXOU-aWh)UNK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5Ey-dO>?uKpNSm(6|F#k009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXFl$yYv^Rw?Ch%B}K^h1CE0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t8MbP82!YDzfxC)DZy!1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlqTws2Ek`9U-e1;JO2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNYufdg}sw6Vz2>{3?*2oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyFjRq-tF!l;iyZ2F;|UNTK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAaJSzy%%TSzZ`k0^Nb@v zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72$ZY9p=*+~^~iGFhrS6AAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z0D;pNsGgPme#6MquVf|x0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV8oz1S%#aX`_+l(X}oK5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72%KDC_v|D+5qa`a9wk74009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfIy+J{haLkk0Y57AV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7e?*a%d-FG&}U9Gfnz zN`L?X0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7P84XLnZ18* zry(&o$B9EW(5di`O2oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0_7%f@TTnjMkC9ufBh05K!5-N0t5&UAV7cs z0RjXF5FkK+009C72uy5&ZBw)V-xWErHSC@M0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjZZtH9m`NxHSj@w!j@B0zuu0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0%Iw# z{gNbIK5{I3u{Hq$1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV6Ru3bfBo(rrghL<4&!K!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1WH?A z-G$lvBSe;Vx4I)hfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!Cu=3GBZiNsC2}TxFFA5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5;&(-xSS{rq1c^0W(?NPqwV0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNX_fyzsgw9&}&=vtQq2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0Rp2Xu=3*U=T9R?YaMk75FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5;&(-+wET4Opn^7Jd2Nq_(W0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBl~xxoChlJrF6$wzsV009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0tAW$ww;}%4@DNAcT9i)0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXFl)u3IOS9is6j}Z|vI7F8BT#*F_WFt}oz8SZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXFguvGKW$z~(8IMZ>1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t7~0 z;PACcS~POxS5T1v0RjXF5FkK+009D{Bd~K`_V<~{(OE@R0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjZdL162(N!n&)IrOYY0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV6Sv0%zsZ*XOx- z%{wl>|E8`<#Z~v7m!uCJe0!GX=L5QKZfoo8YQ3eYc2&>Xnudnet9lx`>#CDEjjLDJ zG*qu@m|b79c5Qb}cRv2?Jgf6u{PlnQ%D+7L#Fz5J!lu_6(+{I=^tT}&u=Lx#+rIXb zhWlfDWb{{gQBi+O7c6XPUDn*+Vtr4;n$?YKd+KWDtgY+mu5GOCnccm%yL(P;&D#1k zjcJR~Uu|CaffpuMAAO~}F4n0ylJ|GKZh9*ET#*m_v+o{xZs&iz^vReP8GWuS?sNHy zWu2`{o4Yy|wzW4mZTX!oAM5#O-=yfLAusULpL~D$c|ZN(`=efO^mD@eLLvJ3KtAw0 zfA`6+?A}~8HReS|KRfe&rsOBum$$YpZ|%IjDPK7Hi0AG1tKQrD!!-~6bJS_MI&+VC zvHqfbz~`_0)HRnZn*Ny>9~tZKFIvBAMc)(Fw^*!uT|VKt%FBNA>(94sjrrSVWb5{i z?|-a#-Rd7-{_K_4zWYFokBoH-@5tuIzPlEmeBI>U-#hf$d!9KK^AE(jedGHdi*-Mg z52%0Uma3|Q`#u-rBV*mheBFW9QAhKV)@3VJb}ejM*4ezWv+3TAtMA#kcKyblH5c^H z4TtRND-@nRXUDy}-upj)7R8gcFs{p)TQ;m;-II=pzT*1)+C?utJ@2!B^NFb2Qk6eHC)7Fabw@t% z)y0=QHskr8IE?9q=xa~$^R#e5dsp+KMa>I4TW@P_>bu9ppKNMzU*DVGF#X7?fBVZQ ze0W~o*9mow_w~Vi;QB9a-|>SVeE#{E7a4uMQsmXQ%_VJ%yA~~2*t(?u$!9Jf@K};ZOc3R z?{AAR!akN(;VZ4X7gX>)U*f%(~)xSnpy2Y#rs;qiZ}*>Nc5MMgjE zc|YgoC-VKZ-qPH6yIa)Syu4}4mUU~6U*FlPg~HOu7q4Hx`iT4AlMh|{*+2j0 zQ%~f}6sqDn?wjBLSo9H}&mVo{(J5Cv^YLXdJ~A8*7x&QzoW9-{E?(ZYqP=NW_L(Sq z#rL;0b4$Z#^<&%6GPYVq2`!enWm8omh9E&)f5Xv#y(U$HS}V{YK1- zj6SR1nbnCWVBo&AVp(h7rTwOT)#ru2dRIr)zSZCOgQ&Z|xUc>?eJ8`OD*x5qY=v;>&KJD%Hy?bk^U*7R6zfM0 z(ntT7j2)c|mbN!_^lYk}J-f1c_T2iqxwUgD^8(RdeBJPP+yAk1=TUN1bpn7}5Dp?@ z0BNT1ayY5DC=Nc zKtPmDIUoW-CBPi;;}UaNQUMx-kjYs#2|$Br5^YgYH7zV5Ev zq9rrSlZN@-pUTbd>8&-3*}Xjra)tb`{IKG%Vs24SZ(pvZRBUg_m0M47L~iVX#n#qB zM{dfvF~!!l5%UZ4kDI%+XWZ~{%}bjLhs`{A;`oER+CRChcm9-*9;Wj^=&~eW>Yks`#vEPsLZDt@(A^{Q)WP{U5$1|Jw(zdnWS9ACSt| zFm85c+$i3hE+DSsl}*m?x^nl;n}$4n{pCBCPaXN}_Q&6J-2UJC@+~uN*z+G=-f7b4 zKMvjWtY1DD_j_^Q8TVIlpBMKxaX&Tgm8>Sm{mdQdIPNd@PxIiI_s6_A=HruI&C6WuM2qZ_w-6am>4xr{kFSl$tW9m~X61$1(oz|BmeQ7>C!S;~4Kw-X!}x z#+m4Eqn}@q`rqhZXQcU1^jmAvar7UjZ^SQ4i|IJ(SB$6GRW>!P+BrKv z>e=2~WygEJFMB3c%Tz$@(tFHOY?RQ1G@Esste`}jKt1p);)e*vakRHZ={VZUs&pLndhWZk^P^s^Ovh0#>ieA)x$OL?ANBpsjvvTAuTR&s z+s37vwvp+s-pF)Y?7ng3oUxJVR&HdvK_A?>a?ahzbT@Bgx*hk}xNe|x8O&`8&)-NyJ3ixHU-#m^Tz2cTfw8)!RO-k_zA-+p{iokOcJ#Pk?-2P$|Jo?u zG(MN5vVRdT)-_(9eC-2wUV6oO5ijPU4dd0VYb;qfe__wkg|&5!h#%wsyw(5t&;_?l zt=(5g|KBkFz~g^&saVPvo3meL>c{^${^uK>u8#k|nzr5hE7w*({!hN>xJNf%w&zj* z-^c&;?lbDf`&DTjDcb4Y+hqM%w6m|J<7f|CY@2-^^>kV~j(WB-9Y?*W??<8@M*WEU zj2QRV?3%@odKL>lOD-O{+sr%LV|lzO@{RQQU9xmhKkC=#dV6~M7w5`_i1^V@tsZ*u zH)b3-`l={jxJ3N9b@A8jrgyybQAq8-QYkx{x+PL-p{5kzw@M$ekE&fwe^d- zekHek_Ib41lhbjuvwPEVw3FNp+4)h=Pfy2D9|xuNh^QCwy?3VX?efD)FTZcMrLU&L zrid5)O1vNG-n@M@=W{7RoFDb_=yQHG{;1U_|2pDDxf@@pD^sAGFyv+H=+}{#xTqeS031=9^L9mZjsU7x8`7x96xI zZ?_#sz5RQO`&Rq0xc+z@$l-4-?*CeS7+`z|&-&MaeCx|wpT5!YfN}rE%G>BV2YP=X zhk?>J`dr*hW*~?9`a94(D!gMJ5clh+e)~5+nm7Knr{nYZy*c8?`;o4?Z?%xm$Nrvp zKjQsluVY?&>Ah!-j&$*Rb;Ed-{iqv=SJ|sn`M3AVi*CL2*i+_ZNB#e;#p}q%$@qP2 z`j{gQ96xn>d5>awx9J-~$LH-S!~ti%dg4i|cRVK|=023gsY_p5--rd#Qs41U>~+ak zZi;mA8s#a~m!~{ItL`_Oc;w+@C->_`c}AoFN3S~Xp;LPA+IrHySvjtb@~oFWOAzj# zNaubc1eOZ@WeCW!x&;B-+X(F4Z zh`*vy{3E86FFVU*Xap~(j_V(pBCTEa?Ce{fSvDmCuc+Tw+aITTN|Z15v;2IA(`P+= z*V(o2GxkwL`66zli+$Xq(hNA>pPWt{bk6!liU7|d(@1tS7+WyY!_RjU<#>T3?*Y<0D>hm|w zh`90EFmBeKrY|)iQDGZ;uO|t$0=R^^?f>>_~Eaf-G1;( zm)slaVjoMxazs0feWR5YlXi{PHJ;&%yB9B>*RybF%do|>dKPt8v#9rdpy%u>S57*< zFY=Gq0rRg(WGTWU?vKy9{L>dLyZFyviFj+$euCP5l$1Wob8I@Xc;9(D+_}e(K2=MX z_VEl@o@{^WdRH9rJ5#`q-T&{8Kl!Q8z7p}{{-aU+RPQ&qj$0Vfy52g~t|N1mo@i8c zjdx_OvO9Xr;pKBGJDV#tzk5OFti`$E-F>qvn~AIYoTpbGbVwW1!zIScFF&;Bo!hj3 z>4)JG>w}HTo2{3(ma<)L{q;H;z_HtJ+P(P&58o2;Vm-29y!hQ~ak!+fIstg~NrRH4{CvzTwmceRuXEp4-Bb#}D2cXa2Qi|wVB zwvN`eTE7(Q7CU`^%+!fD9(hAtU);Ag&iBps4OaHuls67F7dzTJGSOJKhyf~|6S-j8 zgr8N`As=|TZ*V2=`gO=VcE6$L=G%Wi`0cJ+)bHP`->+A{Kd=6Gt$02t=5g`dPRzq% zUK8`Em{-I+CZ1o3`9qAmG2X{>Pcgp6a~v@~KKGAVe;ec4xP7wY7>~|P$1$EfmyTl` zST-a(KlFT)^iONjar7sXhGyw&?S5Ex9PMgSI*xX+G95?#&8PLqs8>^v8^N(Se?O$|cu-@5C9~N4)qx z8^){61FFw?)B;D`_}#Jh)oVWUgB^Fz2FG&n_?@It+~iQ%gHpoaGZpMF`?X)c z&-d>R=BB781H{|l_|h?=Wxeqw`s=l$UYI-Nra4c9M^ma#jq*vqZNsQ-|&H$OG3!+~_X` zjJwXfrN876KgPKst#_Pr(>;g(A>v2>F<|_4$GOhd=I+jpmag_Y|KyRFzY zySY6-dvHlyylE|tUCJkIBbf3u2H@*k6GBgG*_(@BXSEn7j%!RY{l6( zmu|AlKefg6#dv!6X%o&}a{G1daedJrHp*w<-vQc7`S#XQb4PY#731lkG*OKoHoko7 z#~!VWr$68GbJaOS{dl_H%F>g~Ge)d>yW?rx_igO{`L{d%{!ho(`tfvmx_^mr=ecwo znq;M7^u8Z}EP-{=KJ*b{Y5I%f7a9!p@go@!q)pm-Y7}?<`A7D^WY^I0v$pdV zc18SHuUox#?dU6h@zZ-Ee$>}S@zXj)_0IuB%u3pb8{^~^m#y9Jr!#IiG2%wOZy2{W z4>^9`%=D0FSG=aOZz!KL_ulpkUmrC0!pNs7wTFiJ)W+ktmd~^lxb?BmpT6xEn`+M^ zMZeQHpLL!GFP5Jn$>)pt+2Irm2~Tc4Y4Rt3xz&&;f3)X@aqGwVb)Sb_=X-hMqp^{1 zd_P@Z=soV#?Y6%l@{N9CzM~oz4E+H8d^L44YMn5$h=BA+q}K+U2Ld^r2xd|7$vI%8$zWy;{8VK{&*`cE}&z zS9s!g@9d3q@!F_-)n(1{q&6)t)XFhDC0u#YzW?&vql2E0IQOPdJL+ zI^OwDpML5)e|qYhQJ#2hRIaJz@20uX|6Fb~td13GmbbhAKmxh<90c*bkG-TxF0`sOX1Q+>v5QE$(z@=4u`bA9E7xMlPD zszr?JpPvdi=v%*<_UvE&_*#@V=2wmKA9%iADi&Ic9i@CrHDvvK{aq=eXD+$z@ydLC zud&ZmyQcd2`j568H>U9M?;QJoJztOCx#D@(_?@QqeCwv!_Zjf?DbRYl#001D!-+GV>Ap-^s7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM o7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RxAD0FvT5cK`qY literal 0 HcmV?d00001 diff --git a/filebeat/module/system/auth/test/debian-12.journal-expected.json b/filebeat/module/system/auth/test/debian-12.journal-expected.json new file mode 100644 index 000000000000..ee0d8a69ba02 --- /dev/null +++ b/filebeat/module/system/auth/test/debian-12.journal-expected.json @@ -0,0 +1,383 @@ +[ + { + "event.action": "ssh_login", + "event.category": [ + "authentication", + "session" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "success", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Accepted publickey for vagrant from 10.0.2.2 port 48274 ssh2: ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", + "process.args": [ + "\"sshd: vagrant [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: vagrant [priv]\"", + "process.name": "sshd", + "process.pid": 26538, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "10.0.2.2" + ], + "related.user": [ + "vagrant" + ], + "service.type": "system", + "source.address": "10.0.2.2", + "source.ip": "10.0.2.2", + "source.port": 48274, + "system.auth.ssh.event": "Accepted", + "system.auth.ssh.method": "publickey", + "system.auth.ssh.signature": "ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", + "user.group.id": "0", + "user.id": "0", + "user.name": "vagrant" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication", + "session" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "success", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Accepted password for vagrant from 192.168.42.119 port 55310 ssh2", + "process.args": [ + "\"sshd: vagrant [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: vagrant [priv]\"", + "process.name": "sshd", + "process.pid": 1710, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "vagrant" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 55310, + "system.auth.ssh.event": "Accepted", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "vagrant" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Invalid user test from 192.168.42.119 port 48890", + "process.args": [ + "\"sshd: unknown [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: unknown [priv]\"", + "process.name": "sshd", + "process.pid": 1721, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "test" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "system.auth.ssh.event": "Invalid", + "user.group.id": "0", + "user.id": "0", + "user.name": "test" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", + "process.args": [ + "\"sshd: root [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: root [priv]\"", + "process.name": "sshd", + "process.pid": 1723, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "root" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 46632, + "system.auth.ssh.event": "Failed", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "root" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", + "process.args": [ + "\"sshd: root [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: root [priv]\"", + "process.name": "sshd", + "process.pid": 1723, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "root" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 46632, + "system.auth.ssh.event": "Failed", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "root" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", + "process.args": [ + "\"sshd: root [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: root [priv]\"", + "process.name": "sshd", + "process.pid": 1723, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "root" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 46632, + "system.auth.ssh.event": "Failed", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "root" + }, + { + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 10, + "log.syslog.priority": 5, + "message": " vagrant : TTY=pts/2 ; PWD=/home/vagrant ; USER=root ; COMMAND=/usr/bin/emacs /etc/ssh/sshd_config", + "process.args": [ + "sudo", + "emacs", + "/etc/ssh/sshd_config" + ], + "process.args_count": 3, + "process.command_line": "sudo emacs /etc/ssh/sshd_config", + "process.name": "sudo", + "process.pid": 1582, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.user": [ + " vagrant", + "root" + ], + "service.type": "system", + "system.auth.sudo.command": "/usr/bin/emacs /etc/ssh/sshd_config", + "system.auth.sudo.pwd": "/home/vagrant", + "system.auth.sudo.tty": "pts/2", + "system.auth.sudo.user": "root", + "user.effective.name": "root", + "user.group.id": "1000", + "user.id": "1000", + "user.name": " vagrant" + }, + { + "event.category": [ + "iam" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "success", + "event.timezone": "-02:00", + "event.type": [ + "creation", + "group" + ], + "fileset.name": "auth", + "group.id": "1001", + "group.name": "test", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 10, + "log.syslog.priority": 6, + "message": "new group: name=test, GID=1001", + "process.args": [ + "/sbin/groupadd", + "-g", + "1001", + "test" + ], + "process.args_count": 4, + "process.command_line": "/sbin/groupadd -g 1001 test", + "process.name": "groupadd", + "process.pid": 1743, + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system", + "user.effective.group.id": "0", + "user.effective.id": "0", + "user.id": "1000" + }, + { + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Session 8 logged out. Waiting for processes to exit.", + "process.args": [ + "/lib/systemd/systemd-logind" + ], + "process.args_count": 1, + "process.command_line": "/lib/systemd/systemd-logind", + "process.name": "systemd-logind", + "process.pid": 316, + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system", + "user.group.id": "0", + "user.id": "0" + } +] \ No newline at end of file diff --git a/filebeat/module/system/auth/test/secure-rhel7.log-expected.json b/filebeat/module/system/auth/test/secure-rhel7.log-expected.json index 731b4db0423b..71cd8657c7bf 100644 --- a/filebeat/module/system/auth/test/secure-rhel7.log-expected.json +++ b/filebeat/module/system/auth/test/secure-rhel7.log-expected.json @@ -14,7 +14,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 0, "process.name": "sshd", "process.pid": 2738, @@ -47,7 +47,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 97, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -76,7 +76,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 209, "process.name": "sshd", "process.pid": 2738, @@ -109,7 +109,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 306, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -138,7 +138,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 418, "process.name": "sshd", "process.pid": 2738, @@ -171,7 +171,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 515, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -188,7 +188,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 618, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -205,7 +205,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 760, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -222,7 +222,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 842, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -239,7 +239,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 993, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -268,7 +268,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1105, "process.name": "sshd", "process.pid": 2742, @@ -301,7 +301,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1202, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -330,7 +330,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1314, "process.name": "sshd", "process.pid": 2742, @@ -363,7 +363,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1411, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -392,7 +392,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1523, "process.name": "sshd", "process.pid": 2742, @@ -425,7 +425,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1620, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -454,7 +454,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1732, "process.name": "sshd", "process.pid": 2742, @@ -487,7 +487,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1829, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -516,7 +516,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1941, "process.name": "sshd", "process.pid": 2742, @@ -549,7 +549,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2038, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -566,7 +566,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2141, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -583,7 +583,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2283, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -600,7 +600,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2365, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -617,7 +617,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2516, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -638,7 +638,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2628, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -655,7 +655,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2777, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -684,7 +684,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2889, "process.name": "sshd", "process.pid": 2754, @@ -717,7 +717,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2986, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -746,7 +746,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3098, "process.name": "sshd", "process.pid": 2758, @@ -783,7 +783,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3194, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -812,7 +812,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3306, "process.name": "sshd", "process.pid": 2754, @@ -845,7 +845,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3403, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -874,7 +874,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3515, "process.name": "sshd", "process.pid": 2758, @@ -911,7 +911,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3611, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -940,7 +940,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3723, "process.name": "sshd", "process.pid": 2754, @@ -973,7 +973,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3820, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1002,7 +1002,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3932, "process.name": "sshd", "process.pid": 2758, @@ -1039,7 +1039,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4028, "message": "Received disconnect from 216.160.83.58: 11: [preauth]", "process.name": "sshd", @@ -1056,7 +1056,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4119, "message": "PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -1081,7 +1081,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4259, "process.name": "sshd", "process.pid": 2754, @@ -1114,7 +1114,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4356, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1143,7 +1143,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4468, "process.name": "sshd", "process.pid": 2754, @@ -1176,7 +1176,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4565, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -1193,7 +1193,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4668, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1210,7 +1210,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4810, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -1227,7 +1227,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 4892, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1244,7 +1244,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5043, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1273,7 +1273,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5155, "process.name": "sshd", "process.pid": 2762, @@ -1306,7 +1306,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5252, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1335,7 +1335,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5364, "process.name": "sshd", "process.pid": 2762, @@ -1368,7 +1368,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5461, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1397,7 +1397,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5573, "process.name": "sshd", "process.pid": 2762, @@ -1430,7 +1430,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5670, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1459,7 +1459,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5782, "process.name": "sshd", "process.pid": 2762, @@ -1492,7 +1492,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5879, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1521,7 +1521,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5991, "process.name": "sshd", "process.pid": 2762, @@ -1554,7 +1554,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6088, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -1571,7 +1571,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6191, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1588,7 +1588,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6333, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -1605,7 +1605,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6415, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1622,7 +1622,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6566, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1651,7 +1651,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6678, "process.name": "sshd", "process.pid": 2766, @@ -1684,7 +1684,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6775, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1713,7 +1713,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6887, "process.name": "sshd", "process.pid": 2766, @@ -1746,7 +1746,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6984, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1775,7 +1775,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7096, "process.name": "sshd", "process.pid": 2766, @@ -1808,7 +1808,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7193, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1837,7 +1837,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7305, "process.name": "sshd", "process.pid": 2766, @@ -1870,7 +1870,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7402, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1899,7 +1899,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7514, "process.name": "sshd", "process.pid": 2766, @@ -1932,7 +1932,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7611, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -1949,7 +1949,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7714, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1966,7 +1966,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7856, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -1983,7 +1983,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7938, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -2000,7 +2000,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8087, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2029,7 +2029,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8199, "process.name": "sshd", "process.pid": 2778, @@ -2066,7 +2066,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8295, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2095,7 +2095,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8407, "process.name": "sshd", "process.pid": 2778, @@ -2132,7 +2132,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8503, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2161,7 +2161,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8615, "process.name": "sshd", "process.pid": 2778, @@ -2198,7 +2198,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8711, "message": "Received disconnect from 216.160.83.58: 11: [preauth]", "process.name": "sshd", @@ -2215,7 +2215,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8802, "message": "PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -2232,7 +2232,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8942, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -2249,7 +2249,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9093, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2278,7 +2278,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9205, "process.name": "sshd", "process.pid": 2785, @@ -2311,7 +2311,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9302, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2340,7 +2340,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9414, "process.name": "sshd", "process.pid": 2785, @@ -2373,7 +2373,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9511, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2402,7 +2402,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9623, "process.name": "sshd", "process.pid": 2785, @@ -2435,7 +2435,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9720, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2464,7 +2464,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9832, "process.name": "sshd", "process.pid": 2785, @@ -2497,7 +2497,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9929, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2526,7 +2526,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10041, "process.name": "sshd", "process.pid": 2785, @@ -2559,7 +2559,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10138, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -2576,7 +2576,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10241, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -2593,7 +2593,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10383, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -2610,7 +2610,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10465, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -2627,7 +2627,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10616, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2656,7 +2656,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10728, "process.name": "sshd", "process.pid": 2797, @@ -2689,7 +2689,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 10825, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", diff --git a/filebeat/module/system/auth/test/test.log-expected.json b/filebeat/module/system/auth/test/test.log-expected.json index 16e859a66d36..bfe6a5c44b0c 100644 --- a/filebeat/module/system/auth/test/test.log-expected.json +++ b/filebeat/module/system/auth/test/test.log-expected.json @@ -15,7 +15,7 @@ ], "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 0, "process.name": "sshd", "process.pid": 3402, @@ -53,7 +53,7 @@ ], "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 152, "process.name": "sshd", "process.pid": 7483, @@ -89,7 +89,7 @@ ], "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 254, "process.name": "sshd", "process.pid": 3430, @@ -123,7 +123,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 324, "process.name": "sshd", "process.pid": 5774, @@ -160,7 +160,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 420, "process.name": "sudo", "related.hosts": [ @@ -185,7 +185,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "log", + "input.type": "system-logs", "log.offset": 522, "process.name": "sshd", "process.pid": 18406, @@ -214,7 +214,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 616, "process.name": "sudo", "related.hosts": [ @@ -239,7 +239,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "log", + "input.type": "system-logs", "log.offset": 735, "process.name": "sudo", "related.hosts": [ @@ -275,7 +275,7 @@ "group.id": "48", "group.name": "apache", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 860, "process.name": "groupadd", "process.pid": 6991, @@ -300,7 +300,7 @@ "fileset.name": "auth", "group.id": "48", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 933, "process.name": "useradd", "process.pid": 6995, @@ -323,7 +323,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1056, "process.name": "sshd", "process.pid": 10161, diff --git a/filebeat/module/system/auth/test/timestamp.log-expected.json b/filebeat/module/system/auth/test/timestamp.log-expected.json index fd083732af6c..52b028dd3b00 100644 --- a/filebeat/module/system/auth/test/timestamp.log-expected.json +++ b/filebeat/module/system/auth/test/timestamp.log-expected.json @@ -7,7 +7,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.file.path": "timestamp.log", "log.offset": 0, "message": "pam_unix(sudo-i:session): session opened for user root by userauth3(uid=0)", @@ -32,7 +32,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.file.path": "timestamp.log", "log.offset": 118, "message": "user nobody logged out.", diff --git a/filebeat/module/system/syslog/config/syslog.yml b/filebeat/module/system/syslog/config/syslog.yml index e7f238d8af83..3bec875d272e 100644 --- a/filebeat/module/system/syslog/config/syslog.yml +++ b/filebeat/module/system/syslog/config/syslog.yml @@ -1,15 +1,44 @@ -type: log -paths: -{{ range $i, $path := .paths }} - - {{$path}} +type: system-logs + +{{ if .use_journald }} +use_journald: true {{ end }} -exclude_files: [".gz$"] -multiline: - pattern: "^\\s" - match: after + +{{ if .use_files }} +use_files: true +{{ end }} + processors: - add_locale: ~ - add_fields: target: '' fields: ecs.version: 1.12.0 + +journald: + id: system-syslog + facilities: + - 0 + - 1 + - 2 + - 3 + - 5 + - 6 + - 7 + - 8 + - 9 + - 11 + - 12 + - 15 + +files: + id: system-syslog + paths: + {{ range $i, $path := .paths }} + - {{$path}} + {{ end }} + + exclude_files: [".gz$"] + multiline: + pattern: "^\\s" + match: after diff --git a/filebeat/module/system/syslog/ingest/entrypoint.yml b/filebeat/module/system/syslog/ingest/entrypoint.yml new file mode 100644 index 000000000000..e9f3fbc39777 --- /dev/null +++ b/filebeat/module/system/syslog/ingest/entrypoint.yml @@ -0,0 +1,15 @@ +description: Entrypoint Pipeline for system/syslog Filebeat module +processors: + - script: + source: | + if(ctx?.journald != null){ + ctx['syslog_pipeline'] = '{< IngestPipeline "journald" >}'; + return; + } + ctx['syslog_pipeline'] = '{< IngestPipeline "files" >}'; + return; + - pipeline: + name: "{{ syslog_pipeline }}" + - remove: + ignore_failure: true + field: "syslog_pipeline" diff --git a/filebeat/module/system/syslog/ingest/pipeline.yml b/filebeat/module/system/syslog/ingest/files.yml similarity index 100% rename from filebeat/module/system/syslog/ingest/pipeline.yml rename to filebeat/module/system/syslog/ingest/files.yml diff --git a/filebeat/module/system/syslog/ingest/journald.yml b/filebeat/module/system/syslog/ingest/journald.yml new file mode 100644 index 000000000000..5d011784154a --- /dev/null +++ b/filebeat/module/system/syslog/ingest/journald.yml @@ -0,0 +1,29 @@ +description: Journald Pipeline for system/syslog Filebeat module +processors: +- set: + field: event.ingested + copy_from: _ingest.timestamp +- set: + field: "process.pid" + value: '{{ journald.pid }}' +- set: + field: event.kind + value: event +- append: + field: related.hosts + value: "{{host.hostname}}" + if: "ctx.host?.hostname != null && ctx.host?.hostname != ''" + allow_duplicates: false +- remove: + description: Remove the extra fields added by the Journald input + ignore_missing: true + field: + - journald + - process.thread + - syslog + - systemd + - message_id +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/system/syslog/manifest.yml b/filebeat/module/system/syslog/manifest.yml index 39a34e56ca3a..5112ddc5c15a 100644 --- a/filebeat/module/system/syslog/manifest.yml +++ b/filebeat/module/system/syslog/manifest.yml @@ -8,6 +8,14 @@ var: os.darwin: - /var/log/system.log* os.windows: [] + - name: use_journald + default: false + - name: use_files + default: false + +ingest_pipeline: + - ingest/entrypoint.yml + - ingest/files.yml + - ingest/journald.yml -ingest_pipeline: ingest/pipeline.yml input: config/syslog.yml diff --git a/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json b/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json index a5957f19b948..eb8947f85c13 100644 --- a/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json +++ b/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -26,7 +26,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 907, "message": "2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'.", "process.name": "GoogleSoftwareUpdateAgent", @@ -42,7 +42,7 @@ "event.module": "system", "event.timezone": "-02:00", "fileset.name": "syslog", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1176, "message": "--- last message repeated 1 time ---", "service.type": "system" diff --git a/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json b/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json index 6f12a7a5656c..a1620750ff15 100644 --- a/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json +++ b/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 0, "message": "2016-12-13 11:35:28.419 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp performSelfUpdateWithEngine:] Finished self update check.", "process.name": "GoogleSoftwareUpdateAgent", @@ -23,7 +23,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -43,7 +43,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1127, "message": "2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'.", "process.name": "GoogleSoftwareUpdateAgent", @@ -60,7 +60,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 1396, "message": "2016-12-13 11:35:28.422 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSCheckAction performAction] KSCheckAction checking 2 ticket(s).", "process.name": "GoogleSoftwareUpdateAgent", @@ -77,7 +77,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -97,7 +97,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 2833, "message": "2016-12-13 11:35:28.446 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] +[KSCodeSigningVerification verifyBundle:applicationId:error:] KSCodeSigningVerification verifying code signing for '/Applications/Google Chrome.app' with the requirement 'anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] exists and certificate leaf[field.1.2.840.113635.100.6.1.13] exists and certificate leaf[subject.OU]=\"EQHXZ8M8AV\" and (identifier=\"com.google.Chrome\")'", "process.name": "GoogleSoftwareUpdateAgent", @@ -114,7 +114,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 3377, "message": "2016-12-13 11:35:29.430 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] +[KSCodeSigningVerification verifyBundle:applicationId:error:] KSCodeSigningVerification verifying code signing for '/Applications/Google Drive.app' with the requirement 'anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] exists and certificate leaf[field.1.2.840.113635.100.6.1.13] exists and certificate leaf[subject.OU]=\"EQHXZ8M8AV\" and (identifier=\"com.google.GoogleDrive\")'", "process.name": "GoogleSoftwareUpdateAgent", @@ -131,7 +131,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -151,7 +151,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 5675, "message": "2016-12-13 11:35:30.116 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher start fetch from URL: \"https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822\"", "process.name": "GoogleSoftwareUpdateAgent", @@ -168,7 +168,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6055, "message": "2016-12-13 11:35:30.117 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) launchedHelperTaskForToolPath:error:] KSOutOfProcessFetcher launched '/Users/tsg/Library/Google/GoogleSoftwareUpdate/GoogleSoftwareUpdate.bundle/Contents/MacOS/ksfetch' with process id: 21414", "process.name": "GoogleSoftwareUpdateAgent", @@ -185,7 +185,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6436, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher sending both request and download file location to the helper.", "process.name": "GoogleSoftwareUpdateAgent", @@ -202,7 +202,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6719, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] KSSendAllDataToHelper() KSHelperTool wrote 2383 bytes to the helper input.", "process.name": "GoogleSoftwareUpdateAgent", @@ -219,7 +219,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 6943, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] Closing the file handle.", "process.name": "GoogleSoftwareUpdateAgent", @@ -236,7 +236,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7166, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher fetching from URL: \"https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822\"", "process.name": "GoogleSoftwareUpdateAgent", @@ -253,7 +253,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7543, "message": "2016-12-13 11:35:30.149 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] KSHelperReceiveAllData() KSHelperTool read 2383 bytes from stdin.", "process.name": "ksfetch", @@ -270,7 +270,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 7722, "message": "2016-12-13 11:35:30.151 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher received a request: { URL: https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822 }", "process.name": "ksfetch", @@ -287,7 +287,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8050, "message": "2016-12-13 11:35:30.151 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher received a download path: /tmp/KSOutOfProcessFetcher.QTqOLkktQz/download", "process.name": "ksfetch", @@ -304,7 +304,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8251, "message": "2016-12-13 11:35:30.152 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() ksfetch fetching URL ( { URL: https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822 }) to folder:/tmp/KSOutOfProcessFetcher.QTqOLkktQz/download", "process.name": "ksfetch", @@ -321,7 +321,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8631, "message": "2016-12-13 11:35:30.152 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Setting up download file handles...", "process.name": "ksfetch", @@ -338,7 +338,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8787, "message": "2016-12-13 11:35:30.348 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] -[FetchDelegate fetcher:finishedWithData:] Fetcher downloaded successfully data of length: 0", "process.name": "ksfetch", @@ -355,7 +355,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 8993, "message": "2016-12-13 11:35:30.348 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() ksfetch done fetching.", "process.name": "ksfetch", @@ -372,7 +372,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9136, "message": "2016-12-13 11:35:30.351 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher is exiting.", "process.name": "ksfetch", @@ -389,7 +389,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -409,7 +409,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 9540, "message": "2016-12-13 11:35:30.354 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) helperDidTerminate:] KSOutOfProcessFetcher fetch ended for URL: \"https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822\"", "process.name": "GoogleSoftwareUpdateAgent", @@ -426,7 +426,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -446,7 +446,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11060, "message": "2016-12-13 11:35:30.356 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOmahaServer updateInfosForUpdateResponse:updateRequest:infoStore:upToDateTickets:updatedTickets:events:errors:] Response passed CUP validation.", "process.name": "GoogleSoftwareUpdateAgent", @@ -463,7 +463,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11357, "message": "2016-12-13 11:35:30.381 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateCheckAction(PrivateMethods) finishAction] KSUpdateCheckAction found updates: {( )}", "process.name": "GoogleSoftwareUpdateAgent", @@ -480,7 +480,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11599, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSPrefetchAction performAction] KSPrefetchAction no updates to prefetch.", "process.name": "GoogleSoftwareUpdateAgent", @@ -497,7 +497,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 11823, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSMultiUpdateAction performAction] KSSilentUpdateAction had no updates to apply.", "process.name": "GoogleSoftwareUpdateAgent", @@ -514,7 +514,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12055, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSMultiUpdateAction performAction] KSPromptAction had no updates to apply.", "process.name": "GoogleSoftwareUpdateAgent", @@ -531,7 +531,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12281, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp(KeystoneDelegate) updateEngineFinishedWithErrors:] Keystone finished: errors=0", "process.name": "GoogleSoftwareUpdateAgent", @@ -548,7 +548,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 12522, "message": "2016-12-13 11:35:30.385 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine(PrivateMethods) updateFinish] KSUpdateEngine update processing complete.", "process.name": "GoogleSoftwareUpdateAgent", @@ -565,7 +565,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -585,7 +585,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 13788, "message": "2016-12-13 11:35:31.302 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentUploader fetcher:finishedWithData:] Successfully uploaded stats to { URL: https://tools.google.com/service/update2 }", "process.name": "GoogleSoftwareUpdateAgent", @@ -602,7 +602,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.flags": [ "multiline" ], @@ -622,7 +622,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14537, "message": "2016-12-13 11:35:32.508 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp(KeystoneThread) runKeystonesInThreadWithArg:] Finished with engine thread", "process.name": "GoogleSoftwareUpdateAgent", @@ -639,7 +639,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14773, "message": "2016-12-13 11:35:32.825 GoogleSoftwareUpdateAgent[21412/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp checkForUpdates] Finished update check.", "process.name": "GoogleSoftwareUpdateAgent", @@ -656,7 +656,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 14975, "message": "objc[85294]: __weak variable at 0x60000a8499d0 holds 0x2121212121212121 instead of 0x600006a22fa0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -673,7 +673,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15238, "message": "objc[85294]: __weak variable at 0x60800f047240 holds 0x2121212121212121 instead of 0x608002231220. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -690,7 +690,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15501, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21498])", @@ -706,7 +706,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15716, "message": "objc[85294]: __weak variable at 0x60000a256990 holds 0x2121212121212121 instead of 0x600006a22420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -723,7 +723,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 15979, "message": "objc[85294]: __weak variable at 0x6080096475d0 holds 0x2121212121212121 instead of 0x608004e21280. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -740,7 +740,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16242, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -757,7 +757,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16312, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21556])", @@ -773,7 +773,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16527, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -789,7 +789,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16689, "message": "objc[85294]: __weak variable at 0x60000a85a860 holds 0x2121212121212121 instead of 0x600004a3b9a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -806,7 +806,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 16952, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21581])", @@ -822,7 +822,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 17167, "message": "objc[85294]: __weak variable at 0x608009840580 holds 0x2121212121212121 instead of 0x608004a22940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -839,7 +839,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 17430, "message": "objc[85294]: __weak variable at 0x608009c5b700 holds 0x2121212121212121 instead of 0x608005830020. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -856,7 +856,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 17693, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21586])", @@ -872,7 +872,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 17908, "message": "objc[85294]: __weak variable at 0x60800ee592d0 holds 0x2121212121212121 instead of 0x608005627220. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -889,7 +889,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 18171, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -906,7 +906,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 18241, "message": "objc[85294]: __weak variable at 0x60000c648290 holds 0x2121212121212121 instead of 0x6000050242a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -923,7 +923,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 18504, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21589])", @@ -939,7 +939,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 18719, "message": "objc[85294]: __weak variable at 0x600009840460 holds 0x2121212121212121 instead of 0x60000122e940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -956,7 +956,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 18982, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -972,7 +972,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 19144, "message": "objc[85294]: __weak variable at 0x60000ee5b730 holds 0x2121212121212121 instead of 0x600007821c20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -989,7 +989,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 19407, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21946])", @@ -1005,7 +1005,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 19622, "message": "objc[85294]: __weak variable at 0x600006a49940 holds 0x2121212121212121 instead of 0x6000078202e0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1022,7 +1022,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 19885, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1039,7 +1039,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 19955, "message": "Invoked notification with id: d63743fb-f17b-4e9e-97d0-88e0e7304682", "process.name": "Slack Helper", @@ -1056,7 +1056,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 20078, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21966])", @@ -1072,7 +1072,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 20293, "message": "objc[85294]: __weak variable at 0x60800f043dc0 holds 0x2121212121212121 instead of 0x6080026228c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1089,7 +1089,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 20556, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21981])", @@ -1105,7 +1105,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 20771, "message": "objc[85294]: __weak variable at 0x608009a53600 holds 0x2121212121212121 instead of 0x608000629420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1122,7 +1122,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 21034, "message": "objc[85294]: __weak variable at 0x60800f259c30 holds 0x2121212121212121 instead of 0x608004a21c20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1139,7 +1139,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 21297, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1156,7 +1156,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 21367, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22226])", @@ -1172,7 +1172,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 21582, "message": "objc[85294]: __weak variable at 0x60000c647d80 holds 0x2121212121212121 instead of 0x600006e3ee80. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1189,7 +1189,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 21845, "message": "objc[85294]: __weak variable at 0x60800f053a80 holds 0x2121212121212121 instead of 0x608007227ce0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1206,7 +1206,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 22108, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22241])", @@ -1222,7 +1222,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 22323, "message": "objc[85294]: __weak variable at 0x60000a64ce80 holds 0x2121212121212121 instead of 0x600006629940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1239,7 +1239,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 22586, "message": "objc[85294]: __weak variable at 0x60000a843580 holds 0x2121212121212121 instead of 0x600006629540. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1256,7 +1256,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 22849, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22254])", @@ -1272,7 +1272,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 23064, "message": "objc[85294]: __weak variable at 0x60800f45b910 holds 0x2121212121212121 instead of 0x608005822c40. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1289,7 +1289,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 23327, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1306,7 +1306,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 23397, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -1322,7 +1322,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 23559, "message": "objc[85294]: __weak variable at 0x60000ea5edf0 holds 0x2121212121212121 instead of 0x600003a35a60. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1339,7 +1339,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 23822, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22265])", @@ -1355,7 +1355,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 24037, "message": "Invoked notification with id: 52bf37d9-0c4e-4276-8789-9fc7704bdf5b", "process.name": "Slack Helper", @@ -1372,7 +1372,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 24160, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22292])", @@ -1388,7 +1388,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 24375, "message": "Invoked notification with id: c6c7e356-60a7-4b9e-a9b1-ecc2b8ad09f2", "process.name": "Slack Helper", @@ -1405,7 +1405,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 24498, "message": "objc[85294]: __weak variable at 0x60800f246430 holds 0x2121212121212121 instead of 0x608001c26d00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1422,7 +1422,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 24761, "message": "objc[85294]: __weak variable at 0x60800c85fd80 holds 0x2121212121212121 instead of 0x608005a3a420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1439,7 +1439,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 25024, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1456,7 +1456,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 25094, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22305])", @@ -1472,7 +1472,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 25309, "message": "objc[85294]: __weak variable at 0x600006452400 holds 0x2121212121212121 instead of 0x60000763bac0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1489,7 +1489,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 25572, "message": "2016-12-13 12:35:56.416 GoogleSoftwareUpdateAgent[22318/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp setupLoggerOutput] Agent settings: ", "process.name": "GoogleSoftwareUpdateAgent", @@ -1506,7 +1506,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 26456, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22324])", @@ -1522,7 +1522,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 26671, "message": "objc[85294]: __weak variable at 0x60800f24d0f0 holds 0x2121212121212121 instead of 0x608007423ee0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1539,7 +1539,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 26934, "message": "Invoked notification with id: aa608788-d049-4d1a-9112-521c71702371", "process.name": "Slack Helper", @@ -1556,7 +1556,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 27057, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -1572,7 +1572,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 27219, "message": "Invoked notification with id: d75f9ec1-a8fd-41c2-a45e-6df2952f0702", "process.name": "Slack Helper", @@ -1589,7 +1589,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 27342, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22336])", @@ -1605,7 +1605,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 27557, "message": "objc[85294]: __weak variable at 0x60800a2535a0 holds 0x2121212121212121 instead of 0x608003828e20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1622,7 +1622,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 27820, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1639,7 +1639,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 27890, "message": "objc[85294]: __weak variable at 0x60800f241d50 holds 0x2121212121212121 instead of 0x60800562f380. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1656,7 +1656,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 28153, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22348])", @@ -1672,7 +1672,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 28368, "message": "objc[85294]: __weak variable at 0x60000c444450 holds 0x2121212121212121 instead of 0x600007237f00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1689,7 +1689,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "log", + "input.type": "system-logs", "log.offset": 28631, "message": "objc[85294]: __weak variable at 0x60000c4424a0 holds 0x2121212121212121 instead of 0x600007026520. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", diff --git a/filebeat/module/system/syslog/test/debian-12.export b/filebeat/module/system/syslog/test/debian-12.export new file mode 100644 index 0000000000000000000000000000000000000000..780bd46990ecb0fac4928a097badc42b60e820d0 GIT binary patch literal 2133 zcmdT_U2~f_6y0a-pWv-eiS>mL_2|LHBxT12FW^q{q>(_lv^L5BN!ov3*^ZsePC994 zUv_x_=IZF{-gC~CGFs0RY_%&BRe$0w8oh@VWpfk0E ziCw)8vuYsM?l=(0>*J@07fHIlkx`bG@l6T-{NwoCca`m$wlg&VrGvUGmH8|!R#{$( zFZRCg)E3CqaU{x?%jXT^n^;Wlhkmkc_2g;W(-+8SId?lc#Gu=5(su3WVzZTuudd># zOg_Z|HRlINceOHJ&Si9+XX}+XOt>5>duN`<)@f3T{Yw8FC>BW+i;K%)+g1id**un4 z$--r@uO^l4&E$*Km(_9LaHxRsC)hfT+%>f{yLObd?Tp-+VNe?b{>nZ$1{Ui@DfV%Y zq@S{ARwh|`C^TitG#oZngDG^%K#q;7_7OHd;T(d)VK-(Rzh923cRz;(%iDcnM|%ex zql?+~fiz)_lu|gT8AKH7I%I_TlyIeL3^8XXtSH>XJ5zhd1-+N6dl%lqZraXU^>%kR z4wr}RT-jLYNKj@DTb(*|VnMp-Z3APWBaIA#z?)BSV~d z?DhIn*%KCokYnc(4)vPs2?+v<$XV#ZY3fP-|69$!kmFa=cV>Ii{MiNIDe}Svo}j0~ zznUxxeM;$>T#@YtyEX5<;jta7&uzEyM(b5}>rB9FdbKm2zHb`Sd$oCRI`a^N-+cJH zgdx91807j#9UcgSeQ2;|kdev}Mk%f^sv+S4=e}`{`V+!1pF`#|KR_Yd)9SSxAnq6i zpF4wd)c*uw-f9H@juht>q^X#3sSAYRK6raTPLnI_|B)9=()I>UeO8_8$8_7^a)4FX-Li|K(OLE?tFlYDXlN%R zNhc{Cs{e=l49{=kuQToU<^<+~x~f=FFKtCs+D)Lis=0uaCd~TaWLY-T1ESPyXo@ zYgV21^qHR?+4)NE1(#iS&4b_g>6JHbIlq1FjW>K}_N8|{`24k(zPou;<@nr)Y@%J) zCJ$adBRyXITt0a)Haq|L>FM)Trx%h33(~{f$ch@%Rh^iQyZ-U7IcjD1IU}?8gREAbuYN7PpQBcucYJUK7G>}6Q4j8F2Y&W7$EL5Z zsFf>h+mXJ$qE?y0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+0D<8yP(3>R|Di=4?)$Yr0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk2U1|^j(jp*)B`!Uxd;#- zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C7hMvH<5rrgF)S;J9A_4>m5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z!2Str98pLnh`N8X=_Wvc009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNB!cY!e{6_No__deQL0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oM;q0%MOZBwLF*T=!{T1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7e?kt6W(Yx!ieQIDLR+bID81PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyFqpuaQwqtcsDq93JOKg(2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0Rlr$p!%pn5-RG@ODGWm0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAaJ+~tlgDQwh;Aj-;ewR2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&QAA!}Q3dv@o4xgRb2>}8G2oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk2U}p#Yx!h}s0VvJ^AjLIfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkKcs0wU) zHU0b7QHN?yxd;#-K!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjZxTHwWF3dxD6Z#~Mh1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7e? z;Uthhu8_oydN^g5l>h+(1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0Rr!&z@m|bWcjG?WEN`^AV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7e?-~taHmHvA(Q3oIC3;_ZJ2oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7csfx};5?5p`?6HyQU9oYc^0t5&UAV7cs0Rl&gz}RC8$yTEtDf_lh z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0%d_k zClwNis8k3LAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZV0Z|u&!vBVEb8#sm0b`ZK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5*Bx&m`fEhIyu{ZWc_W|1 zi#n9E%0_?y0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0tAMtz_wEgNwla#HK$wz z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0Rl&+z@lT)e~&xrk*Q$U1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C7hKInmo%v)VQHRH_?1BIR0t5&UAV7cs0RjXF5IB?qtKUd} zeAIE5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5ZEm+_T)lx5Owz%j|dPTK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7csfe={x zdOmp=HJ(=q5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z4vaumu8>R=^}x((76Jqa5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5;&cS@jnXFgdv>N}Ogx&#OiAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1P%v*mq!+om{AXh%rX)nK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjZxNr4gB;=NgY;DqKGKl``u{HQY1%f}Uxr&R?C+j&h*t!)i6>!!9( zt0`0$Cg;05JEl&r?QHMp=Gqkc|9W3e2uwtHGvXTGO9AexA(NSPoGlLQ`?nqFV7YEreqWUu;vGMU;kS_elgY=`)ZbN_q@re$obK1;FPN` zy=3%-y+4b-sF8D9mNV9Co72?N+BjogvF_^j%a^tHt(ef&-O=0LH=$;7(i-DxvWd<* zdDSVqwyeB2=3R42dR+$Q$xcMB1#x`Ioo~GG?HNChzNnFFTsF_Z^_{<&121!jU6hPiJ2@>%STnw)ez5+p-+d7wi9VHsIcC&wb{O*(<&k zQMt#HFa*TMLM2_iM!$)qo@|ouU{9c^Sef`Ar`c(7}cwC(I z-Sfb#yWh9%l?}0O)P3Y>m{}}crdbWe`Pn7v${HhAx_U0R{k;vNCja6ykHoxVPI^2A#eLZ`< zl*X5zt6X>S#(O3{amBJv#Q3O#t~>j!*M09Tx8DECqunhogx zVeJ!}|7YF*#rUX$uG>c~?uPvk0Q1G`pjKI<>m-+g@aMI9_xYl)-$-Vy76C~G)<`|mDX)p_QR zqkr74G+AkU@>HxlKO69qhiX3mqsKn7J;p~JY~8lj`7;_@nw#dg)?LxRa#>${_C_8# z(s#F9?w@bpdFwS_+^{g_TbSK1yZh5Kktbe{_nr6pv;SMzJSY014w|Pqg^2az^|)ip z-yFNzAEdD(zpes0&;XWqa5ffyfk&~@vZ=FQvV-i-ApWQ`}U z{8C@T?e}-aJnLiqa$o7mSU=ui*WEjA!`G_TT^Hk{4!VAEVX5Y=Gz!Kvi`_%NjBhuKRt8xy0#_v#rUX$ z=a|vlR-84fSl`-kVX>~JXW&=b8F}J!Zt7Vl{`bP?u6Z)%tv)Jqso!&+-9GVs;FAws z^UxQb9Cvf{MIAI(>4T!RIB#ZK{p|To3!3W^p^3{o`eVv7xoDeA&`ak*7AB;JWc^9=mDu^8>HX zMI$q($W!hwjg9MfVb(YMUGrYraqi8FBS+MI=4qMRP+zQ@Fmd@+y`3eZ$P?FX-S-Fd?2hWx6H*X??GNAyJ2_N2k$4fu|>*$NR*F4FMHho(8 zoxL*0th=5#dHN~!!IT?&^d33NeHA&{vg5DJzI*F;H(qp4tRHo+Ic60Qy*)!*0MwxoOc^7hNSeR>q<8ZT_BpV8XT)R_FFGId&ecTc{$r?Y4B)arbpqo!tZZF^U3&E)Rd zDf#>!->2e!oAm4h$B+NB*#p;OQ~E({U3htjH<{Nb+DYtZGU<9 ziWRFSu4rFYdXP25>nc7!9{cR1|Jc~KJbtXqEjlK3%WnID9Pzr|e#UF#yFRcnwi7Y- zdfi$tF8@TbqJL>=bgUnr8)HB5d#`R-``Kq>q8GFE%h#>k7YWm@gjDf#yHuF2Jt^XaV>`QmkT z|AdZnU#LH8;OB$IV^W{p^QH+RXE=|$dBU>o_ic`^ZxM4)r)<8umJ-v_?yjozqE+pA z1*;}h&F#Oux38*sS^tuyE0#|zPa1jSgJJuY_gr7sHU6wvaL%c3$~$m9gE2npV4mgcQu<~#t&m;7SU(9U3**1^Z)GGO^8)x7zgkh)R2o% z`k=)G-7>Kch#4JqlxATTlH~_F27xe+(;b~0=hSslX`xUgB^DwfwH0%bEyP7gL?A7t znjnyZKonyT3|kZ}qPjEp{AcLWMS^VgbK&vMob$Zr{@$K{NnL!s{G7Tpbz17c)K96;Qop2bNf0Qojg8yZ}RKpoym8T|0Ul{ z{*}Bhc~A1KU#u9y3M@{H`e+2^zWWuMKyll?C1fA*QIvsv%6&ShQAx|8)S^E~U# zymB08UT0iq{$w0wJaG3RyVy7Le5rf*HV2g(PJ zuYF_dz@Hy1NSutHcJG_@e^oKIyBMvlsP5i7GCs6pWNdqNyr>Qpqm81xI$R&zRjY1$ zb$GZ|ufAB@)5xH!-AlXnE$OOceY#jSnD2G!rT2a-8tc<-Z#UP&*7fP3IhVgV(>ZeP zzh0kmE|Bq?bG3}SoM&acY%R~X($DkC`4Pu<^C_OK^C<2&>$${lz22<$+I_!_yN%_G zOkCc7^4hQ8XL?q$ue3g&=Vs2K`?eLu&b>u_S2Iw4zpadW@zWEB&dnc6+E__{wT;_; zzVt2X>0Q38EU?nf>t#DvemLKAc<#xWx*+4AT|1dK<2!2gU4x?ojT4g<)s407BaQR# z`cxj({CRbIv8S;d7WL}Fmy5l%`oQ4mXfZT6Uep&h8*ZIH`TRu3Czm!n*irfJ)P2)8 zw$7icZ|>`zc;L`SH<~{;+Ry*(e98YKq`$LHrJs`b#k1YGY;`TWVdnCGKR45`e-I!* zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK;W--=s*Af002Oczx5y&LIw;NFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3=9SW Dj?7ap literal 0 HcmV?d00001 diff --git a/filebeat/module/system/syslog/test/debian-12.journal-expected.json b/filebeat/module/system/syslog/test/debian-12.journal-expected.json new file mode 100644 index 000000000000..aebf596762cc --- /dev/null +++ b/filebeat/module/system/syslog/test/debian-12.journal-expected.json @@ -0,0 +1,63 @@ +[ + { + "event.dataset": "system.syslog", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "syslog", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 3, + "log.syslog.priority": 6, + "message": "Stopped target getty.target - Login Prompts.", + "process.args": [ + "/sbin/init" + ], + "process.args_count": 1, + "process.command_line": "/sbin/init", + "process.pid": "1", + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system", + "user.group.id": "0", + "user.id": "0" + }, + { + "event.dataset": "system.syslog", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "syslog", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 0, + "log.syslog.priority": 6, + "message": "Console: switching to colour frame buffer device 160x50", + "process.pid": "", + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system" + }, + { + "event.dataset": "system.syslog", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "syslog", + "host.hostname": "bookworm", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "system-logs", + "log.syslog.facility.code": 0, + "log.syslog.priority": 6, + "message": "thermal_sys: Registered thermal governor 'power_allocator'", + "process.pid": "", + "related.hosts": [ + "bookworm" + ], + "service.type": "system" + } +] \ No newline at end of file diff --git a/filebeat/module/system/syslog/test/suse-syslog.log-expected.json b/filebeat/module/system/syslog/test/suse-syslog.log-expected.json index 4090efed2e73..c07c51851def 100644 --- a/filebeat/module/system/syslog/test/suse-syslog.log-expected.json +++ b/filebeat/module/system/syslog/test/suse-syslog.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "linux-sqrz", - "input.type": "log", + "input.type": "system-logs", "log.offset": 0, "message": "Stopped target Basic System.", "process.name": "systemd", @@ -23,7 +23,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "linux-sqrz", - "input.type": "log", + "input.type": "system-logs", "log.offset": 88, "message": "Stopped target Paths.", "process.name": "systemd", diff --git a/filebeat/module/system/syslog/test/tz-offset.log-expected.json b/filebeat/module/system/syslog/test/tz-offset.log-expected.json index 905d8cfd95d9..eacba0d40acc 100644 --- a/filebeat/module/system/syslog/test/tz-offset.log-expected.json +++ b/filebeat/module/system/syslog/test/tz-offset.log-expected.json @@ -7,7 +7,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "rmbkmonitor04", - "input.type": "log", + "input.type": "system-logs", "log.file.path": "tz-offset.log", "log.offset": 0, "message": "shutting down for system halt", @@ -26,7 +26,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "rmbkmonitor04", - "input.type": "log", + "input.type": "system-logs", "log.file.path": "tz-offset.log", "log.offset": 89, "message": "constraint_0_power_limit_uw exceeded.", @@ -44,7 +44,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "localhost", - "input.type": "log", + "input.type": "system-logs", "log.file.path": "tz-offset.log", "log.offset": 184, "message": "pam_unix(sudo-i:session): session opened for user root by userauth3(uid=0)", diff --git a/filebeat/modules.d/system.yml.disabled b/filebeat/modules.d/system.yml.disabled index 1302c6374da8..809b32de2ed6 100644 --- a/filebeat/modules.d/system.yml.disabled +++ b/filebeat/modules.d/system.yml.disabled @@ -10,6 +10,16 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + # Authorization logs auth: enabled: false @@ -17,3 +27,20 @@ # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: + + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # A list of tags to include in events. Including forwarded + # indicates that the events did not originate on this host and + # causes host.name to not be added to events. Include + # preserve_orginal_event causes the pipeline to retain the raw log + # in event.original. Defaults to []. + #var.tags: [] diff --git a/filebeat/tests/system/config/filebeat_modules.yml.j2 b/filebeat/tests/system/config/filebeat_modules.yml.j2 index cde1a3c750d4..7e087af5e8f1 100644 --- a/filebeat/tests/system/config/filebeat_modules.yml.j2 +++ b/filebeat/tests/system/config/filebeat_modules.yml.j2 @@ -9,6 +9,8 @@ filebeat.registry: filebeat.overwrite_pipelines: true +path.logs: {{ beat.working_dir }} + filebeat.config.modules: path: {{ beat.working_dir + '/modules.d/*.yml' }} diff --git a/filebeat/tests/system/test_modules.py b/filebeat/tests/system/test_modules.py index a78bcdecbe9a..db8022b372fc 100644 --- a/filebeat/tests/system/test_modules.py +++ b/filebeat/tests/system/test_modules.py @@ -107,8 +107,11 @@ def load_fileset_test_cases(): if not os.path.isfile(os.path.join(path, fileset, "manifest.yml")): continue - test_files = glob.glob(os.path.join(modules_dir, module, - fileset, "test", os.getenv("TESTING_FILEBEAT_FILEPATTERN", "*.log"))) + test_files_extensions = os.getenv("TESTING_FILEBEAT_FILEPATTERN", "*.log,*.journal").split(",") + test_files = [] + for ext in test_files_extensions: + test_files.extend(glob.glob(os.path.join(modules_dir, module, + fileset, "test", ext))) for test_file in test_files: test_cases.append([module, fileset, test_file]) @@ -166,7 +169,7 @@ def run_on_file(self, module, fileset, test_file, cfgfile): cmd = [ self.filebeat, "-systemTest", - "-e", "-d", "*", "-once", + "-d", "*", "-once", "-c", cfgfile, "-E", "setup.ilm.enabled=false", "-modules={}".format(module), @@ -175,8 +178,6 @@ def run_on_file(self, module, fileset, test_file, cfgfile): module=module, fileset=fileset), "-M", "{module}.{fileset}.var.input=file".format( module=module, fileset=fileset), - "-M", "{module}.{fileset}.var.paths=[{test_file}]".format( - module=module, fileset=fileset, test_file=test_file), "-M", "*.*.input.close_eof=true", ] # allow connecting older versions of Elasticsearch @@ -189,10 +190,25 @@ def run_on_file(self, module, fileset, test_file, cfgfile): cmd.append("{module}.{fileset}.var.format=json".format( module=module, fileset=fileset)) + if ".journal" in test_file: + cmd.remove("-once") + cmd.append("-M") + cmd.append("{module}.{fileset}.var.use_journald=true".format( + module=module, fileset=fileset)) + cmd.append("-M") + cmd.append("{module}.{fileset}.input.journald.paths=[{test_file}]".format( + module=module, fileset=fileset, test_file=test_file)) + else: + cmd.append("-M") + cmd.append("{module}.{fileset}.var.paths=[{test_file}]".format( + module=module, fileset=fileset, test_file=test_file)) + output_path = os.path.join(self.working_dir) # Runs inside a with block to ensure file is closed afterwards with open(os.path.join(output_path, "output.log"), "ab") as output: - output.write(bytes(" ".join(cmd) + "\n", "utf-8")) + output.write(bytes("Command run: ", "utf-8")) + output.write(bytes(" ".join(cmd) + "\n\n", "utf-8")) + output.flush() # Use a fixed timezone so results don't vary depending on the environment # Don't use UTC to avoid hiding that non-UTC timezones are not being converted as needed, @@ -201,12 +217,23 @@ def run_on_file(self, module, fileset, test_file, cfgfile): local_env = os.environ.copy() local_env["TZ"] = 'Etc/GMT+2' - subprocess.Popen(cmd, - env=local_env, - stdin=None, - stdout=output, - stderr=subprocess.STDOUT, - bufsize=0).wait() + proc = subprocess.Popen(cmd, + env=local_env, + stdin=None, + stdout=output, + stderr=subprocess.STDOUT, + bufsize=0) + # The journald input (used by some modules like 'system') does not + # support the -once flag, hence we run Filebeat for at most + # 15 seconds, if it does not finish, then kill the process. + # If for any reason the Filebeat process gets stuck, only SIGKILL + # will terminate it. We use SIGKILL to avoid leaking any running + # process that could interfere with other tests + try: + proc.wait(15) + except subprocess.TimeoutExpired: + # Send SIGKILL + proc.kill() # List of errors to check in filebeat output logs errors = ["error loading pipeline for fileset"] diff --git a/libbeat/tests/system/beat/beat.py b/libbeat/tests/system/beat/beat.py index bc1126402cd4..74d5ab628162 100644 --- a/libbeat/tests/system/beat/beat.py +++ b/libbeat/tests/system/beat/beat.py @@ -851,7 +851,7 @@ def is_documented(key, docs): is_documented_aliases.append(key) if undocumented_keys: - raise Exception(f"Keys {undocumented_keys} not documented in event {str(evt)}") + raise Exception(f"Keys:\n\n{undocumented_keys}\n\nnot documented in event:\n\n{str(evt)}\n") if is_documented_aliases: raise Exception(f"Keys {is_documented_aliases} documented as aliases!") diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 09a540aa21ed..749f0e0c291f 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -21,7 +21,18 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Input configuration (advanced). Any input configuration option + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # Input configuration (advanced). + # Any input configuration option # can be added under this section. #input: @@ -33,6 +44,23 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # A list of tags to include in events. Including 'forwarded' + # indicates that the events did not originate on this host and + # causes host.name to not be added to events. Include + # 'preserve_orginal_event' causes the pipeline to retain the raw log + # in event.original. Defaults to []. + #var.tags: [] + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: From e9617a7d7375ddb536d64197d745b32bd43dceac Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 14 Oct 2024 12:31:18 -0400 Subject: [PATCH 31/90] Skip flaky tests TestFileWatcher and TestGroup_Go (#41223) --------- Co-authored-by: Denis --- filebeat/input/filestream/fswatch_test.go | 1 + filebeat/input/filestream/internal/task/group_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/filebeat/input/filestream/fswatch_test.go b/filebeat/input/filestream/fswatch_test.go index 3fab8bfd2bd2..528caec79de3 100644 --- a/filebeat/input/filestream/fswatch_test.go +++ b/filebeat/input/filestream/fswatch_test.go @@ -36,6 +36,7 @@ import ( ) func TestFileWatcher(t *testing.T) { + t.Skip("Flaky test: https://github.com/elastic/beats/issues/41209") dir := t.TempDir() paths := []string{filepath.Join(dir, "*.log")} cfgStr := ` diff --git a/filebeat/input/filestream/internal/task/group_test.go b/filebeat/input/filestream/internal/task/group_test.go index 30b9858a1de3..db50ef3ccabe 100644 --- a/filebeat/input/filestream/internal/task/group_test.go +++ b/filebeat/input/filestream/internal/task/group_test.go @@ -67,6 +67,7 @@ func TestNewGroup(t *testing.T) { } func TestGroup_Go(t *testing.T) { + t.Skip("Flaky tests: https://github.com/elastic/beats/issues/41218") t.Run("don't run more than limit goroutines", func(t *testing.T) { done := make(chan struct{}) defer close(done) From a7915d877c5307d71ff82b53ead69f36c6063f5f Mon Sep 17 00:00:00 2001 From: Michael Montgomery Date: Mon, 14 Oct 2024 11:43:36 -0500 Subject: [PATCH 32/90] Adjust Beats container user to be numeric. (#41197) Signed-off-by: Michael Montgomery --- dev-tools/packaging/templates/docker/Dockerfile.tmpl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 85904ffe5dd2..d5696e9fa0e1 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -205,7 +205,7 @@ RUN cd /usr/share/heartbeat/.node \ && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ && chmod ug+rwX -R $NODE_PATH -# Install synthetics as a regular user, installing npm deps as root odesn't work +# Install synthetics as a regular user, installing npm deps as root doesn't work RUN chown -R {{ .user }} $NODE_PATH USER {{ .user }} # If this fails dump the NPM logs @@ -227,7 +227,11 @@ done; \ (exit $exit_code) {{- end }} -USER {{ .user }} +{{- if eq .user "root" }} +USER 0 +{{- else }} +USER 1000 +{{- end }} {{- range $i, $port := .ExposePorts }} EXPOSE {{ $port }} From f69b50127ee0fe029aae727dcce1da6852f4f62d Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 14 Oct 2024 15:00:42 -0400 Subject: [PATCH 33/90] De-duplicate pipeline steps in system.auth module (#41229) --- filebeat/module/system/auth/ingest/common.yml | 172 +++++++++++++++++ .../module/system/auth/ingest/entrypoint.yml | 3 + filebeat/module/system/auth/ingest/files.yml | 175 +----------------- .../module/system/auth/ingest/journald.yml | 175 +----------------- filebeat/module/system/auth/manifest.yml | 2 + 5 files changed, 181 insertions(+), 346 deletions(-) create mode 100644 filebeat/module/system/auth/ingest/common.yml diff --git a/filebeat/module/system/auth/ingest/common.yml b/filebeat/module/system/auth/ingest/common.yml new file mode 100644 index 000000000000..75c2a8e46a9b --- /dev/null +++ b/filebeat/module/system/auth/ingest/common.yml @@ -0,0 +1,172 @@ +description: Common steps for Journald and log files from system/auth Filebeat module +processors: + - grok: + description: Grok usernames from PAM messages. + tag: grok-pam-users + field: message + ignore_missing: true + ignore_failure: true + patterns: + - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}? by %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?(?:\(uid=%{NUMBER:_temp.byuid}\))?$' + - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}?$' + - 'by user %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?$' + - '%{BOUNDARY} user %{QUOTE}%{DATA:_temp.user}%{QUOTE}' + pattern_definitions: + QUOTE: "['\"]" + BOUNDARY: "(?- + if (ctx.system.auth.ssh.event == "Accepted") { + ctx.event.type = ["info"]; + ctx.event.category = ["authentication", "session"]; + ctx.event.action = "ssh_login"; + ctx.event.outcome = "success"; + } else if (ctx.system.auth.ssh.event == "Invalid" || ctx.system.auth.ssh.event == "Failed") { + ctx.event.type = ["info"]; + ctx.event.category = ["authentication"]; + ctx.event.action = "ssh_login"; + ctx.event.outcome = "failure"; + } + - append: + field: event.category + value: iam + if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - set: + field: event.outcome + value: success + if: ctx.process?.name != null && (ctx.message == null || !ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - set: + field: event.outcome + value: failure + if: ctx.process?.name != null && (ctx.message != null && ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - append: + field: event.type + value: user + if: ctx.process?.name != null && ['useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - append: + field: event.type + value: group + if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod'].contains(ctx.process.name) + - append: + field: event.type + value: creation + if: ctx.process?.name != null && ['useradd', 'groupadd'].contains(ctx.process.name) + - append: + field: event.type + value: deletion + if: ctx.process?.name != null && ['userdel', 'groupdel'].contains(ctx.process.name) + - append: + field: event.type + value: change + if: ctx.process?.name != null && ['usermod', 'groupmod'].contains(ctx.process.name) + - append: + field: related.user + value: "{{{ user.name }}}" + allow_duplicates: false + if: ctx.user?.name != null && ctx.user?.name != '' + - append: + field: related.user + value: "{{{ user.effective.name }}}" + allow_duplicates: false + if: ctx.user?.effective?.name != null && ctx.user?.effective?.name != '' + - append: + field: related.ip + value: "{{{ source.ip }}}" + allow_duplicates: false + if: ctx.source?.ip != null && ctx.source?.ip != '' + - append: + field: related.hosts + value: "{{{ host.hostname }}}" + allow_duplicates: false + if: ctx.host?.hostname != null && ctx.host?.hostname != '' + - set: + field: ecs.version + value: 8.0.0 + - remove: + field: event.original + if: "ctx?.tags == null || !(ctx.tags.contains('preserve_original_event'))" + ignore_failure: true + ignore_missing: true diff --git a/filebeat/module/system/auth/ingest/entrypoint.yml b/filebeat/module/system/auth/ingest/entrypoint.yml index 93869fd1486f..7da5fc4a5d40 100644 --- a/filebeat/module/system/auth/ingest/entrypoint.yml +++ b/filebeat/module/system/auth/ingest/entrypoint.yml @@ -1,5 +1,8 @@ description: Entrypoint Pipeline for system/auth Filebeat module processors: + - set: + field: event.ingested + copy_from: _ingest.timestamp - script: source: | if(ctx?.journald != null){ diff --git a/filebeat/module/system/auth/ingest/files.yml b/filebeat/module/system/auth/ingest/files.yml index 39611f484a82..fbeebc12b7e2 100644 --- a/filebeat/module/system/auth/ingest/files.yml +++ b/filebeat/module/system/auth/ingest/files.yml @@ -1,9 +1,6 @@ --- description: Pipeline for parsing system authorization and secure logs. processors: - - set: - field: event.ingested - copy_from: _ingest.timestamp - rename: if: ctx.event?.original == null field: message @@ -28,76 +25,8 @@ processors: target_field: message - remove: field: _temp - - grok: - description: Grok usernames from PAM messages. - tag: grok-pam-users - field: message - ignore_missing: true - ignore_failure: true - patterns: - - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}? by %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?(?:\(uid=%{NUMBER:_temp.byuid}\))?$' - - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}?$' - - 'by user %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?$' - - '%{BOUNDARY} user %{QUOTE}%{DATA:_temp.user}%{QUOTE}' - pattern_definitions: - QUOTE: "['\"]" - BOUNDARY: "(?}" - date: if: ctx.event?.timezone == null field: system.auth.timestamp @@ -125,106 +54,6 @@ processors: value: '{{{ _ingest.on_failure_message }}}' - remove: field: system.auth.timestamp - - geoip: - field: source.ip - target_field: source.geo - ignore_missing: true - - geoip: - database_file: GeoLite2-ASN.mmdb - field: source.ip - target_field: source.as - properties: - - asn - - organization_name - ignore_missing: true - - rename: - field: source.as.asn - target_field: source.as.number - ignore_missing: true - - rename: - field: source.as.organization_name - target_field: source.as.organization.name - ignore_missing: true - - set: - field: event.kind - value: event - - script: - description: Add event.category/action/output to SSH events. - tag: script-categorize-ssh-event - if: ctx.system?.auth?.ssh?.event != null - lang: painless - source: >- - if (ctx.system.auth.ssh.event == "Accepted") { - ctx.event.type = ["info"]; - ctx.event.category = ["authentication", "session"]; - ctx.event.action = "ssh_login"; - ctx.event.outcome = "success"; - } else if (ctx.system.auth.ssh.event == "Invalid" || ctx.system.auth.ssh.event == "Failed") { - ctx.event.type = ["info"]; - ctx.event.category = ["authentication"]; - ctx.event.action = "ssh_login"; - ctx.event.outcome = "failure"; - } - - append: - field: event.category - value: iam - if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - set: - field: event.outcome - value: success - if: ctx.process?.name != null && (ctx.message == null || !ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - set: - field: event.outcome - value: failure - if: ctx.process?.name != null && (ctx.message != null && ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - append: - field: event.type - value: user - if: ctx.process?.name != null && ['useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - append: - field: event.type - value: group - if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod'].contains(ctx.process.name) - - append: - field: event.type - value: creation - if: ctx.process?.name != null && ['useradd', 'groupadd'].contains(ctx.process.name) - - append: - field: event.type - value: deletion - if: ctx.process?.name != null && ['userdel', 'groupdel'].contains(ctx.process.name) - - append: - field: event.type - value: change - if: ctx.process?.name != null && ['usermod', 'groupmod'].contains(ctx.process.name) - - append: - field: related.user - value: "{{{ user.name }}}" - allow_duplicates: false - if: ctx.user?.name != null && ctx.user?.name != '' - - append: - field: related.user - value: "{{{ user.effective.name }}}" - allow_duplicates: false - if: ctx.user?.effective?.name != null && ctx.user?.effective?.name != '' - - append: - field: related.ip - value: "{{{ source.ip }}}" - allow_duplicates: false - if: ctx.source?.ip != null && ctx.source?.ip != '' - - append: - field: related.hosts - value: "{{{ host.hostname }}}" - allow_duplicates: false - if: ctx.host?.hostname != null && ctx.host?.hostname != '' - - set: - field: ecs.version - value: 8.0.0 - - remove: - field: event.original - if: "ctx?.tags == null || !(ctx.tags.contains('preserve_original_event'))" - ignore_failure: true - ignore_missing: true on_failure: - set: field: error.message diff --git a/filebeat/module/system/auth/ingest/journald.yml b/filebeat/module/system/auth/ingest/journald.yml index 10e7ae96054e..aee3f5263ede 100644 --- a/filebeat/module/system/auth/ingest/journald.yml +++ b/filebeat/module/system/auth/ingest/journald.yml @@ -1,8 +1,5 @@ description: Journald Pipeline for system/auth Filebeat module processors: - - set: - field: event.ingested - copy_from: _ingest.timestamp - rename: field: "journald.process.name" target_field: process.name @@ -16,176 +13,8 @@ processors: - rename: field: _temp.message target_field: message - - grok: - description: Grok usernames from PAM messages. - tag: grok-pam-users - field: message - ignore_missing: true - ignore_failure: true - patterns: - - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}? by %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?(?:\(uid=%{NUMBER:_temp.byuid}\))?$' - - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}?$' - - 'by user %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?$' - - '%{BOUNDARY} user %{QUOTE}%{DATA:_temp.user}%{QUOTE}' - pattern_definitions: - QUOTE: "['\"]" - BOUNDARY: "(?- - if (ctx.system.auth.ssh.event == "Accepted") { - ctx.event.type = ["info"]; - ctx.event.category = ["authentication", "session"]; - ctx.event.action = "ssh_login"; - ctx.event.outcome = "success"; - } else if (ctx.system.auth.ssh.event == "Invalid" || ctx.system.auth.ssh.event == "Failed") { - ctx.event.type = ["info"]; - ctx.event.category = ["authentication"]; - ctx.event.action = "ssh_login"; - ctx.event.outcome = "failure"; - } - - append: - field: event.category - value: iam - if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - set: - field: event.outcome - value: success - if: ctx.process?.name != null && (ctx.message == null || !ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - set: - field: event.outcome - value: failure - if: ctx.process?.name != null && (ctx.message != null && ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - append: - field: event.type - value: user - if: ctx.process?.name != null && ['useradd', 'userdel', 'usermod'].contains(ctx.process.name) - - append: - field: event.type - value: group - if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod'].contains(ctx.process.name) - - append: - field: event.type - value: creation - if: ctx.process?.name != null && ['useradd', 'groupadd'].contains(ctx.process.name) - - append: - field: event.type - value: deletion - if: ctx.process?.name != null && ['userdel', 'groupdel'].contains(ctx.process.name) - - append: - field: event.type - value: change - if: ctx.process?.name != null && ['usermod', 'groupmod'].contains(ctx.process.name) - - append: - field: related.user - value: "{{{ user.name }}}" - allow_duplicates: false - if: ctx.user?.name != null && ctx.user?.name != '' - - append: - field: related.user - value: "{{{ user.effective.name }}}" - allow_duplicates: false - if: ctx.user?.effective?.name != null && ctx.user?.effective?.name != '' - - append: - field: related.ip - value: "{{{ source.ip }}}" - allow_duplicates: false - if: ctx.source?.ip != null && ctx.source?.ip != '' - - append: - field: related.hosts - value: "{{{ host.hostname }}}" - allow_duplicates: false - if: ctx.host?.hostname != null && ctx.host?.hostname != '' - - set: - field: ecs.version - value: 8.0.0 - - remove: - field: event.original - if: "ctx?.tags == null || !(ctx.tags.contains('preserve_original_event'))" - ignore_failure: true - ignore_missing: true + - pipeline: + name: "{< IngestPipeline "common" >}" - remove: description: Remove the extra fields added by the Journald input ignore_missing: true diff --git a/filebeat/module/system/auth/manifest.yml b/filebeat/module/system/auth/manifest.yml index 4b99d6407b76..fefc51a88a45 100644 --- a/filebeat/module/system/auth/manifest.yml +++ b/filebeat/module/system/auth/manifest.yml @@ -22,4 +22,6 @@ ingest_pipeline: - ingest/files.yml - ingest/journald.yml - ingest/grok-auth-messages.yml + - ingest/common.yml + input: config/auth.yml From 59c989debe2477bb7b944042bf48d0a3fe1aac60 Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Tue, 15 Oct 2024 18:33:10 +1030 Subject: [PATCH 34/90] x-pack/filebeat/input/internal/private: add support for marked redaction (#41212) The initial implementation only supported the removal of redacted values. This adds the option to replace redacted values with a redaction place holder, defined by the type of the value. --- CHANGELOG-developer.next.asciidoc | 1 + .../input/internal/private/private.go | 143 +++++++++- .../input/internal/private/private_test.go | 251 +++++++++++++++++- 3 files changed, 378 insertions(+), 17 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 610078d225ea..2c1c772c1d81 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -208,6 +208,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Simplified GCS input state checkpoint calculation logic. {issue}40878[40878] {pull}40937[40937] - Simplified Azure Blob Storage input state checkpoint calculation logic. {issue}40674[40674] {pull}40936[40936] - Add field redaction package. {pull}40997[40997] +- Add support for marked redaction to x-pack/filebeat/input/internal/private {pull}41212[41212] ==== Deprecated diff --git a/x-pack/filebeat/input/internal/private/private.go b/x-pack/filebeat/input/internal/private/private.go index e47b6521e477..c0b2e311fded 100644 --- a/x-pack/filebeat/input/internal/private/private.go +++ b/x-pack/filebeat/input/internal/private/private.go @@ -35,7 +35,11 @@ var privateKey = reflect.ValueOf("private") // `private:""`, the fields with the tag will be marked as private. Otherwise // the comma-separated list of names with be used. The list may refer to its // own field. -func Redact[T any](val T, tag string, global []string) (redacted T, err error) { +func Redact[T any](val T, tag string, global []string, replace ...Replacer) (redacted T, err error) { + reps, err := compileReplacers(replace) + if err != nil { + return redacted, err + } defer func() { switch r := recover().(type) { case nil: @@ -54,13 +58,65 @@ func Redact[T any](val T, tag string, global []string) (redacted T, err error) { rv := reflect.ValueOf(val) switch rv.Kind() { case reflect.Map, reflect.Pointer, reflect.Struct: - return redact(rv, tag, slices.Clone(global), 0, make(map[any]int)).Interface().(T), nil + return redact(rv, reps, tag, slices.Clone(global), 0, make(map[any]int)).Interface().(T), nil default: return val, nil } } -func redact(v reflect.Value, tag string, global []string, depth int, seen map[any]int) reflect.Value { +// Replacer is a function that will return a redaction replacement +// for the provided type. It must be a func(T) T. +type Replacer any + +// NewStringReplacer returns a string Replacer that returns s. +func NewStringReplacer(s string) Replacer { + return func(string) string { + return s + } +} + +// NewBytesReplacer returns a []byte Replacer that returns the bytes +// representation of s. +func NewBytesReplacer(s string) Replacer { + return func([]byte) []byte { + return []byte(s) + } +} + +type replacers map[reflect.Type]func(reflect.Value) reflect.Value + +func compileReplacers(replace []Replacer) (replacers, error) { + reps := make(replacers) + for _, r := range replace { + rv := reflect.ValueOf(r) + rt := rv.Type() + if rt.Kind() != reflect.Func { + return nil, fmt.Errorf("replacer is not a function: %T", r) + } + if n := rt.NumIn(); n != 1 { + return nil, fmt.Errorf("incorrect number of arguments for replacer: %d != 1", n) + } + if n := rt.NumOut(); n != 1 { + return nil, fmt.Errorf("incorrect number of return values from replacer: %d != 1", n) + } + in, out := rt.In(0), rt.Out(0) + if in != out { + return nil, fmt.Errorf("replacer does not preserve type: fn(%s) %s", in, out) + } + if _, exists := reps[in]; exists { + return nil, fmt.Errorf("multiple replacers for %s", in) + } + reps[in] = func(v reflect.Value) reflect.Value { + return rv.Call([]reflect.Value{v})[0] + } + } + if len(reps) == 0 { + reps = nil + } + return reps, nil +} + +func redact(v reflect.Value, reps replacers, tag string, global []string, depth int, seen map[any]int) reflect.Value { switch v.Kind() { case reflect.Pointer: if v.IsNil() { @@ -74,19 +130,19 @@ func redact(v reflect.Value, tag string, global []string, depth int, seen map[an seen[ident] = depth defer delete(seen, ident) } - return redact(v.Elem(), tag, global, depth+1, seen).Addr() + return redact(v.Elem(), reps, tag, global, depth+1, seen).Addr() case reflect.Interface: if v.IsNil() { return v } - return redact(v.Elem(), tag, global, depth+1, seen) + return redact(v.Elem(), reps, tag, global, depth+1, seen) case reflect.Array: if v.Len() == 0 { return v } r := reflect.New(v.Type()).Elem() for i := 0; i < v.Len(); i++ { - r.Index(i).Set(redact(v.Index(i), tag, global, depth+1, seen)) + r.Index(i).Set(redact(v.Index(i), reps, tag, global, depth+1, seen)) } return r case reflect.Slice: @@ -109,7 +165,7 @@ func redact(v reflect.Value, tag string, global []string, depth int, seen map[an } r := reflect.MakeSlice(v.Type(), v.Len(), v.Cap()) for i := 0; i < v.Len(); i++ { - r.Index(i).Set(redact(v.Index(i), tag, global, depth+1, seen)) + r.Index(i).Set(redact(v.Index(i), reps, tag, global, depth+1, seen)) } return r case reflect.Map: @@ -145,9 +201,13 @@ func redact(v reflect.Value, tag string, global []string, depth int, seen map[an for it.Next() { name := it.Key().String() if slices.Contains(private, name) { + v := replaceNestedWithin(it.Value(), reps) + if v.IsValid() { + r.SetMapIndex(it.Key(), v) + } continue } - r.SetMapIndex(it.Key(), redact(it.Value(), tag, nextPath(name, global), depth+1, seen)) + r.SetMapIndex(it.Key(), redact(it.Value(), reps, tag, nextPath(name, global), depth+1, seen)) } return r case reflect.Struct: @@ -219,10 +279,14 @@ func redact(v reflect.Value, tag string, global []string, depth int, seen map[an continue } if slices.Contains(private, names[i]) { + v := replaceNestedWithin(f, reps) + if v.IsValid() { + r.Field(i).Set(v) + } continue } if r.Field(i).CanSet() { - r.Field(i).Set(redact(f, tag, nextPath(names[i], global), depth+1, seen)) + r.Field(i).Set(redact(f, reps, tag, nextPath(names[i], global), depth+1, seen)) } } return r @@ -230,6 +294,67 @@ func redact(v reflect.Value, tag string, global []string, depth int, seen map[an return v } +// replaceNestedWithin replaces deeply nested values in pointer, interface and +// array/slice chains. If a replacement is not made an invalid reflect.Value +// is returned. If elements are not replaced by a replacer, it is set to the +// zero value for the type. +func replaceNestedWithin(v reflect.Value, reps replacers) reflect.Value { + if len(reps) == 0 || !v.IsValid() { + // No replacer, or an invalid value, so fall back to removal. + return reflect.Value{} + } + if rep, ok := reps[v.Type()]; ok { + return rep(v) + } + switch v.Kind() { + case reflect.Pointer: + r := replaceNestedWithin(v.Elem(), reps) + if !r.IsValid() { + return r + } + return r.Addr() + case reflect.Interface: + r := replaceNestedWithin(v.Elem(), reps) + if !r.IsValid() { + return r + } + i := reflect.New(v.Type()).Elem() + i.Set(r) + return i + case reflect.Array: + a := reflect.New(v.Type()).Elem() + wasSet := false + for i := 0; i < v.Len(); i++ { + r := replaceNestedWithin(v.Index(i), reps) + if r.IsValid() { + wasSet = true + a.Index(i).Set(r) + } + } + if !wasSet { + return reflect.Value{} + } + return a + case reflect.Slice: + s := reflect.MakeSlice(v.Type(), v.Len(), v.Cap()) + wasSet := false + for i := 0; i < v.Len(); i++ { + r := replaceNestedWithin(v.Index(i), reps) + if r.IsValid() { + wasSet = true + s.Index(i).Set(r) + } + } + if !wasSet { + return reflect.Value{} + } + return s + default: + // Could not catch, fall back to removal. + return reflect.Value{} + } +} + func nextStep(global []string) (private []string) { if len(global) == 0 { return nil diff --git a/x-pack/filebeat/input/internal/private/private_test.go b/x-pack/filebeat/input/internal/private/private_test.go index 774e35f3d532..aa813ada5d1b 100644 --- a/x-pack/filebeat/input/internal/private/private_test.go +++ b/x-pack/filebeat/input/internal/private/private_test.go @@ -7,20 +7,23 @@ package private import ( "bytes" "encoding/json" + "errors" "net/url" "reflect" + "strings" "testing" "github.com/google/go-cmp/cmp" ) type redactTest struct { - name string - in any - tag string - global []string - want any - wantErr error + name string + in any + tag string + global []string + replacers []Replacer + want any + wantErr error } var redactTests = []redactTest{ @@ -36,6 +39,34 @@ var redactTests = []redactTest{ "not_secret": "2", }, }, + { + name: "map_string_replacer", + in: map[string]any{ + "private": "secret", + "secret": "this is a secret", + "not_secret": "this is not", + }, + replacers: []Replacer{NewStringReplacer("REDACTED")}, + want: map[string]any{ + "private": "secret", + "secret": "REDACTED", + "not_secret": "this is not", + }, + }, + { + name: "map_string_custom_replacer", + in: map[string]any{ + "private": "secret", + "secret": "this is a secret", + "not_secret": "this is not", + }, + replacers: []Replacer{func(s string) string { return strings.Repeat("*", len(s)) }}, + want: map[string]any{ + "private": "secret", + "secret": "****************", // Same length as original. + "not_secret": "this is not", + }, + }, { name: "map_string_inner", in: map[string]any{ @@ -80,6 +111,78 @@ var redactTests = []redactTest{ }, }}, }, + { + name: "map_string_inner_next_inner_global_slices", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + }}, + global: []string{"inner.next_inner.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "not_secret": []string{"2"}, + }, + }}, + }, + { + name: "map_string_inner_next_inner_global_nested_slices", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "secret": [][]string{{"1"}}, + "not_secret": [][]string{{"2"}}, + }, + }}, + global: []string{"inner.next_inner.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "not_secret": [][]string{{"2"}}, + }, + }}, + }, + { + name: "map_string_inner_next_inner_global_slices_replacer", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + }}, + replacers: []Replacer{NewStringReplacer("REDACTED")}, + global: []string{"inner.next_inner.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "not_secret": []string{"2"}, + "secret": []string{"REDACTED"}, + }, + }}, + }, + { + name: "map_string_inner_next_inner_global_nested_slices_replacer", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "secret": [][]string{{"1"}}, + "not_secret": [][]string{{"2"}}, + }, + }}, + replacers: []Replacer{NewStringReplacer("REDACTED")}, + global: []string{"inner.next_inner.secret"}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": map[string]any{ + "secret": [][]string{{"REDACTED"}}, + "not_secret": [][]string{{"2"}}, + }, + }}, + }, { name: "map_string_inner_next_inner_params_global", in: map[string]any{ @@ -193,6 +296,49 @@ var redactTests = []redactTest{ }, }}, }, + { + name: "map_string_inner_next_inner_params_global_internal_slice_precise_replacer", + in: map[string]any{ + "inner": map[string]any{ + "next_inner": []map[string]any{ + { + "headers": url.Values{ + "secret": []string{"1"}, + "not_secret": []string{"2"}, + }, + "not_secret": "2", + }, + { + "headers": url.Values{ + "secret": []string{"3"}, + "not_secret": []string{"4"}, + }, + "not_secret": "4", + }, + }, + }}, + global: []string{"inner.next_inner.headers.secret"}, + replacers: []Replacer{NewStringReplacer("REDACTED")}, + want: map[string]any{ + "inner": map[string]any{ + "next_inner": []map[string]any{ + { + "headers": url.Values{ + "not_secret": []string{"2"}, + "secret": []string{"REDACTED"}, + }, + "not_secret": "2", + }, + { + "headers": url.Values{ + "not_secret": []string{"4"}, + "secret": []string{"REDACTED"}, + }, + "not_secret": "4", + }, + }, + }}, + }, { name: "map_slice", in: map[string]any{ @@ -239,6 +385,50 @@ var redactTests = []redactTest{ }, } }(), + func() redactTest { + type s struct { + Private string + Secret string + NotSecret string + } + return redactTest{ + name: "struct_string_replacer", + in: s{ + Private: "Secret", + Secret: "this is a secret", + NotSecret: "this is not", + }, + replacers: []Replacer{NewStringReplacer("REDACTED")}, + tag: "", + want: s{ + Private: "Secret", + Secret: "REDACTED", + NotSecret: "this is not", + }, + } + }(), + func() redactTest { + type s struct { + Private string + Secret string + NotSecret string + } + return redactTest{ + name: "struct_string_replacer", + in: s{ + Private: "Secret", + Secret: "this is a secret", + NotSecret: "this is not", + }, + replacers: []Replacer{func(s string) string { return strings.Repeat("*", len(s)) }}, + tag: "", + want: s{ + Private: "Secret", + Secret: "****************", + NotSecret: "this is not", + }, + } + }(), func() redactTest { type s struct { Private []string @@ -399,6 +589,37 @@ var redactTests = []redactTest{ wantErr: cycle{reflect.TypeOf(&s{})}, } }(), + { + name: "invalid_replacer_wrong_type", + in: struct{}{}, + replacers: []Replacer{func(s string) int { return len(s) }}, + want: struct{}{}, + wantErr: errors.New("replacer does not preserve type: fn(string) int"), + }, + { + name: "invalid_replacer_wrong_argnum", + in: struct{}{}, + replacers: []Replacer{func(a, b string) string { return a + b }}, + want: struct{}{}, + wantErr: errors.New("incorrect number of arguments for replacer: 2 != 1"), + }, + { + name: "invalid_replacer_wrong_retnum", + in: struct{}{}, + replacers: []Replacer{func(s string) (a, b string) { return s, s }}, + want: struct{}{}, + wantErr: errors.New("incorrect number of return values from replacer: 2 != 1"), + }, + { + name: "invalid_replacer_collision", + in: struct{}{}, + replacers: []Replacer{ + func(s string) string { return s }, + func(s string) string { return s }, + }, + want: struct{}{}, + wantErr: errors.New("multiple replacers for string"), + }, } func TestRedact(t *testing.T) { @@ -415,10 +636,13 @@ func TestRedact(t *testing.T) { t.Fatalf("failed to get before state: %v", err) } } - got, err := Redact(test.in, test.tag, test.global) - if err != test.wantErr { + got, err := Redact(test.in, test.tag, test.global, test.replacers...) + if !sameError(err, test.wantErr) { t.Fatalf("unexpected error from Redact: %v", err) } + if err != nil { + return + } if !isCycle { after, err := json.Marshal(test.in) if err != nil { @@ -434,3 +658,14 @@ func TestRedact(t *testing.T) { }) } } + +func sameError(a, b error) bool { + switch { + case a == nil && b == nil: + return true + case a == nil, b == nil: + return false + default: + return a.Error() == b.Error() + } +} From 66dacd95edef63bd5516467323bd6a0352b66168 Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Tue, 15 Oct 2024 08:05:46 -0300 Subject: [PATCH 35/90] Re-enable TestGroup_Go and fix flaky behavior (#41230) --- .../filestream/internal/task/group_test.go | 53 ++++++++++++------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/filebeat/input/filestream/internal/task/group_test.go b/filebeat/input/filestream/internal/task/group_test.go index db50ef3ccabe..6ba0ac2cf1db 100644 --- a/filebeat/input/filestream/internal/task/group_test.go +++ b/filebeat/input/filestream/internal/task/group_test.go @@ -36,15 +36,21 @@ type noopLogger struct{} func (n noopLogger) Errorf(string, ...interface{}) {} -type testLogger strings.Builder +type testLogger struct { + mu sync.Mutex + b strings.Builder +} func (tl *testLogger) Errorf(format string, args ...interface{}) { - sb := (*strings.Builder)(tl) - sb.WriteString(fmt.Sprintf(format, args...)) - sb.WriteString("\n") + tl.mu.Lock() + defer tl.mu.Unlock() + tl.b.WriteString(fmt.Sprintf(format, args...)) + tl.b.WriteString("\n") } func (tl *testLogger) String() string { - return (*strings.Builder)(tl).String() + tl.mu.Lock() + defer tl.mu.Unlock() + return tl.b.String() } func TestNewGroup(t *testing.T) { @@ -67,7 +73,6 @@ func TestNewGroup(t *testing.T) { } func TestGroup_Go(t *testing.T) { - t.Skip("Flaky tests: https://github.com/elastic/beats/issues/41218") t.Run("don't run more than limit goroutines", func(t *testing.T) { done := make(chan struct{}) defer close(done) @@ -227,14 +232,12 @@ func TestGroup_Go(t *testing.T) { t.Run("all workloads return an error", func(t *testing.T) { logger := &testLogger{} - runCunt := atomic.Uint64{} - wg := sync.WaitGroup{} + var count atomic.Uint64 wantErr := errors.New("a error") workload := func(i int) func(context.Context) error { return func(_ context.Context) error { - defer runCunt.Add(1) - defer wg.Done() + defer count.Add(1) return fmt.Errorf("[%d]: %w", i, wantErr) } } @@ -242,23 +245,24 @@ func TestGroup_Go(t *testing.T) { want := uint64(2) g := NewGroup(want, time.Second, logger, "errorPrefix") - wg.Add(1) err := g.Go(workload(1)) require.NoError(t, err) - wg.Wait() - wg.Add(1) err = g.Go(workload(2)) require.NoError(t, err) - wg.Wait() - err = g.Stop() + assert.Eventually(t, func() bool { + return count.Load() == want && logger.String() != "" + }, 100*time.Millisecond, time.Millisecond) + err = g.Stop() require.NoError(t, err) + logs := logger.String() assert.Contains(t, logs, wantErr.Error()) assert.Contains(t, logs, "[2]") assert.Contains(t, logs, "[1]") + }) t.Run("some workloads return an error", func(t *testing.T) { @@ -268,17 +272,26 @@ func TestGroup_Go(t *testing.T) { g := NewGroup(want, time.Second, logger, "") - err := g.Go(func(_ context.Context) error { return nil }) + var count atomic.Uint64 + err := g.Go(func(_ context.Context) error { + count.Add(1) + return nil + }) require.NoError(t, err) - err = g.Go(func(_ context.Context) error { return wantErr }) + err = g.Go(func(_ context.Context) error { + count.Add(1) + return wantErr + }) require.NoError(t, err) - time.Sleep(time.Millisecond) + assert.Eventually(t, func() bool { + return count.Load() == want && logger.String() != "" + }, 100*time.Millisecond, time.Millisecond, "not all workloads finished") - err = g.Stop() + assert.Contains(t, logger.String(), wantErr.Error()) + err = g.Stop() assert.NoError(t, err) - assert.Contains(t, logger.String(), wantErr.Error()) }) t.Run("workload returns no error", func(t *testing.T) { From 3492089397644e8395f5132f63f7ee60832b5d5a Mon Sep 17 00:00:00 2001 From: Denis Date: Tue, 15 Oct 2024 15:40:30 +0200 Subject: [PATCH 36/90] Switch K8s tests to Wolfi by default and make it configurable (#41222) By default we test Wolfi-based images but there is now an environment variable `IMAGE_MODIFIER` that can be used for specifying different image prefixes when calling the make target. Also, now the test verifies the actual container readiness and deletes resources after itself. --- deploy/kubernetes/Makefile | 15 +++++++++++---- deploy/kubernetes/auditbeat-kubernetes.yaml | 2 +- .../kubernetes/auditbeat/auditbeat-daemonset.yaml | 2 +- deploy/kubernetes/filebeat-kubernetes.yaml | 2 +- .../kubernetes/filebeat/filebeat-daemonset.yaml | 2 +- deploy/kubernetes/heartbeat-kubernetes.yaml | 2 +- .../heartbeat/heartbeat-deployment.yaml | 2 +- deploy/kubernetes/metricbeat-kubernetes.yaml | 2 +- .../metricbeat/metricbeat-daemonset.yaml | 2 +- 9 files changed, 19 insertions(+), 12 deletions(-) diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 166c83bf515a..d05b6a886832 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -1,4 +1,5 @@ ALL=filebeat metricbeat auditbeat heartbeat +IMAGE_MODIFIER?="-wolfi" BEAT_VERSION=$(shell head -n 1 ../../libbeat/docs/version.asciidoc | cut -c 17- ) .PHONY: all $(ALL) @@ -6,21 +7,27 @@ BEAT_VERSION=$(shell head -n 1 ../../libbeat/docs/version.asciidoc | cut -c 17- all: $(ALL) test: all - for FILE in $(shell ls *-kubernetes.yaml); do \ - BEAT=$$(echo $$FILE | cut -d \- -f 1); \ + @for BEAT in $(ALL); do \ + echo; \ + echo "$$BEAT"; \ + FILE="$$BEAT-kubernetes.yaml"; \ kubectl create -f $$FILE; \ + echo "Testing $$BEAT container for readiness..."; \ + kubectl wait pods -n kube-system -l k8s-app=$$BEAT --for=condition=Ready --timeout=90s; \ + echo "Deleting $$BEAT..."; \ + kubectl delete -f $$FILE; \ done clean: @for f in $(ALL); do rm -f "$$f-kubernetes.yaml"; done $(ALL): - @echo "Generating $@-kubernetes.yaml" + @echo "Generating $@-kubernetes.yaml for version ${BEAT_VERSION} and image modifier '${IMAGE_MODIFIER}'" @rm -f $@-kubernetes.yaml @for f in service-account role role-binding configmap deployment daemonset ; do \ if [ -f "$@/$@-$$f.yaml" ]; then \ echo "file: $@/$@-$$f.yaml"; \ - sed "s/%VERSION%/${BEAT_VERSION}/g" $@/$@-$$f.yaml >> $@-kubernetes.yaml; \ + cat $@/$@-$$f.yaml | sed "s/%VERSION%/${BEAT_VERSION}/g" | sed "s/%IMAGE_MODIFIER%/${IMAGE_MODIFIER}/g" >> $@-kubernetes.yaml; \ echo --- >> $@-kubernetes.yaml; \ fi \ done diff --git a/deploy/kubernetes/auditbeat-kubernetes.yaml b/deploy/kubernetes/auditbeat-kubernetes.yaml index db3588ad9605..23c940ad4e0a 100644 --- a/deploy/kubernetes/auditbeat-kubernetes.yaml +++ b/deploy/kubernetes/auditbeat-kubernetes.yaml @@ -209,7 +209,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: auditbeat - image: docker.elastic.co/beats/auditbeat:9.0.0 + image: docker.elastic.co/beats/auditbeat-wolfi:9.0.0 args: [ "-c", "/etc/auditbeat.yml", "-e", diff --git a/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml b/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml index 39eaf726eefc..39a2c35c3f1b 100644 --- a/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml +++ b/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml @@ -22,7 +22,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: auditbeat - image: docker.elastic.co/beats/auditbeat:%VERSION% + image: docker.elastic.co/beats/auditbeat%IMAGE_MODIFIER%:%VERSION% args: [ "-c", "/etc/auditbeat.yml", "-e", diff --git a/deploy/kubernetes/filebeat-kubernetes.yaml b/deploy/kubernetes/filebeat-kubernetes.yaml index e272abe98930..f028322c1aca 100644 --- a/deploy/kubernetes/filebeat-kubernetes.yaml +++ b/deploy/kubernetes/filebeat-kubernetes.yaml @@ -183,7 +183,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: filebeat - image: docker.elastic.co/beats/filebeat:9.0.0 + image: docker.elastic.co/beats/filebeat-wolfi:9.0.0 args: [ "-c", "/etc/filebeat.yml", "-e", diff --git a/deploy/kubernetes/filebeat/filebeat-daemonset.yaml b/deploy/kubernetes/filebeat/filebeat-daemonset.yaml index b6df8f31fdbd..c027abede2af 100644 --- a/deploy/kubernetes/filebeat/filebeat-daemonset.yaml +++ b/deploy/kubernetes/filebeat/filebeat-daemonset.yaml @@ -20,7 +20,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: filebeat - image: docker.elastic.co/beats/filebeat:%VERSION% + image: docker.elastic.co/beats/filebeat%IMAGE_MODIFIER%:%VERSION% args: [ "-c", "/etc/filebeat.yml", "-e", diff --git a/deploy/kubernetes/heartbeat-kubernetes.yaml b/deploy/kubernetes/heartbeat-kubernetes.yaml index 90c5ca7a3cc9..280c243d305b 100644 --- a/deploy/kubernetes/heartbeat-kubernetes.yaml +++ b/deploy/kubernetes/heartbeat-kubernetes.yaml @@ -171,7 +171,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: heartbeat - image: docker.elastic.co/beats/heartbeat:9.0.0 + image: docker.elastic.co/beats/heartbeat-wolfi:9.0.0 args: [ "-c", "/etc/heartbeat.yml", "-e", diff --git a/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml b/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml index 3f1a73d3324a..ec95e50ee53d 100644 --- a/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml +++ b/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml @@ -20,7 +20,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: heartbeat - image: docker.elastic.co/beats/heartbeat:%VERSION% + image: docker.elastic.co/beats/heartbeat%IMAGE_MODIFIER%:%VERSION% args: [ "-c", "/etc/heartbeat.yml", "-e", diff --git a/deploy/kubernetes/metricbeat-kubernetes.yaml b/deploy/kubernetes/metricbeat-kubernetes.yaml index 8fb3e5e087d4..418c902bffc0 100644 --- a/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -291,7 +291,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:9.0.0 + image: docker.elastic.co/beats/metricbeat-wolfi:9.0.0 args: [ "-c", "/etc/metricbeat.yml", "-e", diff --git a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index c4004d91e288..e8c0074be6de 100644 --- a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -21,7 +21,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:%VERSION% + image: docker.elastic.co/beats/metricbeat%IMAGE_MODIFIER%:%VERSION% args: [ "-c", "/etc/metricbeat.yml", "-e", From 42f2d41694106acbdc3d85e4a0f612a3a5a36795 Mon Sep 17 00:00:00 2001 From: Kavindu Dodanduwa Date: Tue, 15 Oct 2024 07:22:20 -0700 Subject: [PATCH 37/90] [Filebeat] [AWS] Add support to source AWS cloudwatch logs from linked accounts (#41188) * use LogGroupIdentifier fiter instead of LogGroupName and related parameter, field renaming Signed-off-by: Kavindu Dodanduwa * configuration parsing to support arn & linked accounts Signed-off-by: Kavindu Dodanduwa * document the ARN usage Signed-off-by: Kavindu Dodanduwa * add changelog entry Signed-off-by: Kavindu Dodanduwa * code review changes Signed-off-by: Kavindu Dodanduwa * code review change - fix typo Signed-off-by: Kavindu Dodanduwa --------- Signed-off-by: Kavindu Dodanduwa Co-authored-by: kaiyan-sheng --- CHANGELOG.next.asciidoc | 1 + .../filebeat.inputs.reference.xpack.yml.tmpl | 2 + .../docs/inputs/input-aws-cloudwatch.asciidoc | 18 +++- x-pack/filebeat/filebeat.reference.yml | 2 + .../input/awscloudwatch/cloudwatch.go | 36 +++---- .../input/awscloudwatch/cloudwatch_test.go | 100 +++++++++--------- x-pack/filebeat/input/awscloudwatch/input.go | 86 +++++++++------ .../input/awscloudwatch/input_test.go | 98 ++++++++++++++++- .../filebeat/input/awscloudwatch/processor.go | 10 +- 9 files changed, 237 insertions(+), 116 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ebd20cb190cb..2ba3d43c93f6 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -325,6 +325,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Improved GCS input documentation. {pull}41143[41143] - Add CSV decoding capacity to azureblobstorage input {pull}40978[40978] - Add CSV decoding capacity to gcs input {pull}40979[40979] +- Add support to source AWS cloudwatch logs from linked accounts. {pull}41188[41188] - Jounrald input now supports filtering by facilities {pull}41061[41061] - System module now supports reading from jounrald. {pull}41061[41061] diff --git a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl index 8215bc3c3893..3f131b6dc494 100644 --- a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl +++ b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl @@ -135,6 +135,8 @@ #credential_profile_name: test-aws-s3-input # ARN of the log group to collect logs from + # This ARN could refer to a log group from a linked source account + # Note: This property precedes over `log_group_name` & `log_group_name_prefix` #log_group_arn: "arn:aws:logs:us-east-1:428152502467:log-group:test:*" # Name of the log group to collect logs from. diff --git a/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc index c2b898da3587..733f0bac41f5 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc @@ -40,16 +40,26 @@ The `aws-cloudwatch` input supports the following configuration options plus the [float] ==== `log_group_arn` ARN of the log group to collect logs from. +The ARN may refer to a log group in a linked source account. + +Note: `log_group_arn` cannot be combined with `log_group_name`, `log_group_name_prefix` and `region_name` properties. +If set, values extracted from `log_group_arn` takes precedence over them. + +Note: If the log group is in a linked source account and filebeat is configured to use a monitoring account, you must use the `log_group_arn`. +You can read more about AWS account linking and cross account observability from the https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html[official documentation]. [float] ==== `log_group_name` -Name of the log group to collect logs from. Note: `region_name` is required when -log_group_name is given. +Name of the log group to collect logs from. + +Note: `region_name` is required when log_group_name is given. [float] ==== `log_group_name_prefix` -The prefix for a group of log group names. Note: `region_name` is required when -log_group_name_prefix is given. `log_group_name` and `log_group_name_prefix` +The prefix for a group of log group names. + +Note: `region_name` is required when +`log_group_name_prefix` is given. `log_group_name` and `log_group_name_prefix` cannot be given at the same time. The number of workers that will process the log groups under this prefix is set through the `number_of_workers` config. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 749f0e0c291f..a2f1daeebb4f 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -3069,6 +3069,8 @@ filebeat.inputs: #credential_profile_name: test-aws-s3-input # ARN of the log group to collect logs from + # This ARN could refer to a log group from a linked source account + # Note: This property precedes over `log_group_name` & `log_group_name_prefix` #log_group_arn: "arn:aws:logs:us-east-1:428152502467:log-group:test:*" # Name of the log group to collect logs from. diff --git a/x-pack/filebeat/input/awscloudwatch/cloudwatch.go b/x-pack/filebeat/input/awscloudwatch/cloudwatch.go index ffc5b2e3cd80..4d089268e356 100644 --- a/x-pack/filebeat/input/awscloudwatch/cloudwatch.go +++ b/x-pack/filebeat/input/awscloudwatch/cloudwatch.go @@ -37,7 +37,7 @@ type cloudwatchPoller struct { } type workResponse struct { - logGroup string + logGroupId string startTime, endTime time.Time } @@ -64,8 +64,8 @@ func newCloudwatchPoller(log *logp.Logger, metrics *inputMetrics, } } -func (p *cloudwatchPoller) run(svc *cloudwatchlogs.Client, logGroup string, startTime, endTime time.Time, logProcessor *logProcessor) { - err := p.getLogEventsFromCloudWatch(svc, logGroup, startTime, endTime, logProcessor) +func (p *cloudwatchPoller) run(svc *cloudwatchlogs.Client, logGroupId string, startTime, endTime time.Time, logProcessor *logProcessor) { + err := p.getLogEventsFromCloudWatch(svc, logGroupId, startTime, endTime, logProcessor) if err != nil { var errRequestCanceled *awssdk.RequestCanceledError if errors.As(err, &errRequestCanceled) { @@ -76,9 +76,9 @@ func (p *cloudwatchPoller) run(svc *cloudwatchlogs.Client, logGroup string, star } // getLogEventsFromCloudWatch uses FilterLogEvents API to collect logs from CloudWatch -func (p *cloudwatchPoller) getLogEventsFromCloudWatch(svc *cloudwatchlogs.Client, logGroup string, startTime, endTime time.Time, logProcessor *logProcessor) error { +func (p *cloudwatchPoller) getLogEventsFromCloudWatch(svc *cloudwatchlogs.Client, logGroupId string, startTime, endTime time.Time, logProcessor *logProcessor) error { // construct FilterLogEventsInput - filterLogEventsInput := p.constructFilterLogEventsInput(startTime, endTime, logGroup) + filterLogEventsInput := p.constructFilterLogEventsInput(startTime, endTime, logGroupId) paginator := cloudwatchlogs.NewFilterLogEventsPaginator(svc, filterLogEventsInput) for paginator.HasMorePages() { filterLogEventsOutput, err := paginator.NextPage(context.TODO()) @@ -96,16 +96,16 @@ func (p *cloudwatchPoller) getLogEventsFromCloudWatch(svc *cloudwatchlogs.Client p.log.Debug("done sleeping") p.log.Debugf("Processing #%v events", len(logEvents)) - logProcessor.processLogEvents(logEvents, logGroup, p.region) + logProcessor.processLogEvents(logEvents, logGroupId, p.region) } return nil } -func (p *cloudwatchPoller) constructFilterLogEventsInput(startTime, endTime time.Time, logGroup string) *cloudwatchlogs.FilterLogEventsInput { +func (p *cloudwatchPoller) constructFilterLogEventsInput(startTime, endTime time.Time, logGroupId string) *cloudwatchlogs.FilterLogEventsInput { filterLogEventsInput := &cloudwatchlogs.FilterLogEventsInput{ - LogGroupName: awssdk.String(logGroup), - StartTime: awssdk.Int64(unixMsFromTime(startTime)), - EndTime: awssdk.Int64(unixMsFromTime(endTime)), + LogGroupIdentifier: awssdk.String(logGroupId), + StartTime: awssdk.Int64(unixMsFromTime(startTime)), + EndTime: awssdk.Int64(unixMsFromTime(endTime)), } if len(p.config.LogStreams) > 0 { @@ -138,9 +138,9 @@ func (p *cloudwatchPoller) startWorkers( work = <-p.workResponseChan } - p.log.Infof("aws-cloudwatch input worker for log group: '%v' has started", work.logGroup) - p.run(svc, work.logGroup, work.startTime, work.endTime, logProcessor) - p.log.Infof("aws-cloudwatch input worker for log group '%v' has stopped.", work.logGroup) + p.log.Infof("aws-cloudwatch input worker for log group: '%v' has started", work.logGroupId) + p.run(svc, work.logGroupId, work.startTime, work.endTime, logProcessor) + p.log.Infof("aws-cloudwatch input worker for log group '%v' has stopped.", work.logGroupId) } }() } @@ -149,7 +149,7 @@ func (p *cloudwatchPoller) startWorkers( // receive implements the main run loop that distributes tasks to the worker // goroutines. It accepts a "clock" callback (which on a live input should // equal time.Now) to allow deterministic unit tests. -func (p *cloudwatchPoller) receive(ctx context.Context, logGroupNames []string, clock func() time.Time) { +func (p *cloudwatchPoller) receive(ctx context.Context, logGroupIDs []string, clock func() time.Time) { defer p.workerWg.Wait() // startTime and endTime are the bounds of the current scanning interval. // If we're starting at the end of the logs, advance the start time to the @@ -160,15 +160,15 @@ func (p *cloudwatchPoller) receive(ctx context.Context, logGroupNames []string, startTime = endTime.Add(-p.config.ScanFrequency) } for ctx.Err() == nil { - for _, lg := range logGroupNames { + for _, lg := range logGroupIDs { select { case <-ctx.Done(): return case <-p.workRequestChan: p.workResponseChan <- workResponse{ - logGroup: lg, - startTime: startTime, - endTime: endTime, + logGroupId: lg, + startTime: startTime, + endTime: endTime, } } } diff --git a/x-pack/filebeat/input/awscloudwatch/cloudwatch_test.go b/x-pack/filebeat/input/awscloudwatch/cloudwatch_test.go index f666db859824..0c266c8291f1 100644 --- a/x-pack/filebeat/input/awscloudwatch/cloudwatch_test.go +++ b/x-pack/filebeat/input/awscloudwatch/cloudwatch_test.go @@ -31,7 +31,7 @@ type receiveTestStep struct { type receiveTestCase struct { name string - logGroups []string + logGroupIDs []string configOverrides func(*config) startTime time.Time steps []receiveTestStep @@ -46,37 +46,37 @@ func TestReceive(t *testing.T) { t3 := t2.Add(time.Hour) testCases := []receiveTestCase{ { - name: "Default config with one log group", - logGroups: []string{"a"}, - startTime: t1, + name: "Default config with one log group", + logGroupIDs: []string{"a"}, + startTime: t1, steps: []receiveTestStep{ { expected: []workResponse{ - {logGroup: "a", startTime: t0, endTime: t1}, + {logGroupId: "a", startTime: t0, endTime: t1}, }, nextTime: t2, }, { expected: []workResponse{ - {logGroup: "a", startTime: t1, endTime: t2}, + {logGroupId: "a", startTime: t1, endTime: t2}, }, nextTime: t3, }, { expected: []workResponse{ - {logGroup: "a", startTime: t2, endTime: t3}, + {logGroupId: "a", startTime: t2, endTime: t3}, }, }, }, }, { - name: "Default config with two log groups", - logGroups: []string{"a", "b"}, - startTime: t1, + name: "Default config with two log groups", + logGroupIDs: []string{"a", "b"}, + startTime: t1, steps: []receiveTestStep{ { expected: []workResponse{ - {logGroup: "a", startTime: t0, endTime: t1}, + {logGroupId: "a", startTime: t0, endTime: t1}, }, nextTime: t2, }, @@ -84,49 +84,49 @@ func TestReceive(t *testing.T) { expected: []workResponse{ // start/end times for the second log group should be the same // even though the clock has changed. - {logGroup: "b", startTime: t0, endTime: t1}, + {logGroupId: "b", startTime: t0, endTime: t1}, }, }, { expected: []workResponse{ - {logGroup: "a", startTime: t1, endTime: t2}, - {logGroup: "b", startTime: t1, endTime: t2}, + {logGroupId: "a", startTime: t1, endTime: t2}, + {logGroupId: "b", startTime: t1, endTime: t2}, }, nextTime: t3, }, { expected: []workResponse{ - {logGroup: "a", startTime: t2, endTime: t3}, - {logGroup: "b", startTime: t2, endTime: t3}, + {logGroupId: "a", startTime: t2, endTime: t3}, + {logGroupId: "b", startTime: t2, endTime: t3}, }, }, }, }, { - name: "One log group with start_position: end", - logGroups: []string{"a"}, - startTime: t1, + name: "One log group with start_position: end", + logGroupIDs: []string{"a"}, + startTime: t1, configOverrides: func(c *config) { c.StartPosition = "end" }, steps: []receiveTestStep{ { expected: []workResponse{ - {logGroup: "a", startTime: t1.Add(-defaultScanFrequency), endTime: t1}, + {logGroupId: "a", startTime: t1.Add(-defaultScanFrequency), endTime: t1}, }, nextTime: t2, }, { expected: []workResponse{ - {logGroup: "a", startTime: t1, endTime: t2}, + {logGroupId: "a", startTime: t1, endTime: t2}, }, }, }, }, { - name: "Two log group with start_position: end and latency", - logGroups: []string{"a", "b"}, - startTime: t1, + name: "Two log group with start_position: end and latency", + logGroupIDs: []string{"a", "b"}, + startTime: t1, configOverrides: func(c *config) { c.StartPosition = "end" c.Latency = time.Second @@ -134,40 +134,40 @@ func TestReceive(t *testing.T) { steps: []receiveTestStep{ { expected: []workResponse{ - {logGroup: "a", startTime: t1.Add(-defaultScanFrequency - time.Second), endTime: t1.Add(-time.Second)}, - {logGroup: "b", startTime: t1.Add(-defaultScanFrequency - time.Second), endTime: t1.Add(-time.Second)}, + {logGroupId: "a", startTime: t1.Add(-defaultScanFrequency - time.Second), endTime: t1.Add(-time.Second)}, + {logGroupId: "b", startTime: t1.Add(-defaultScanFrequency - time.Second), endTime: t1.Add(-time.Second)}, }, nextTime: t2, }, { expected: []workResponse{ - {logGroup: "a", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, - {logGroup: "b", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, + {logGroupId: "a", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, + {logGroupId: "b", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, }, }, }, }, { - name: "Three log groups with latency", - logGroups: []string{"a", "b", "c"}, - startTime: t1, + name: "Three log groups with latency", + logGroupIDs: []string{"a", "b", "c"}, + startTime: t1, configOverrides: func(c *config) { c.Latency = time.Second }, steps: []receiveTestStep{ { expected: []workResponse{ - {logGroup: "a", startTime: t0, endTime: t1.Add(-time.Second)}, - {logGroup: "b", startTime: t0, endTime: t1.Add(-time.Second)}, - {logGroup: "c", startTime: t0, endTime: t1.Add(-time.Second)}, + {logGroupId: "a", startTime: t0, endTime: t1.Add(-time.Second)}, + {logGroupId: "b", startTime: t0, endTime: t1.Add(-time.Second)}, + {logGroupId: "c", startTime: t0, endTime: t1.Add(-time.Second)}, }, nextTime: t2, }, { expected: []workResponse{ - {logGroup: "a", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, - {logGroup: "b", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, - {logGroup: "c", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, + {logGroupId: "a", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, + {logGroupId: "b", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, + {logGroupId: "c", startTime: t1.Add(-time.Second), endTime: t2.Add(-time.Second)}, }, }, }, @@ -191,7 +191,7 @@ func TestReceive(t *testing.T) { test.configOverrides(&p.config) } clock.time = test.startTime - go p.receive(ctx, test.logGroups, clock.now) + go p.receive(ctx, test.logGroupIDs, clock.now) for _, step := range test.steps { for i, expected := range step.expected { p.workRequestChan <- struct{}{} @@ -209,34 +209,36 @@ func TestReceive(t *testing.T) { } type filterLogEventsTestCase struct { - name string - logGroup string - startTime time.Time - endTime time.Time - expected *cloudwatchlogs.FilterLogEventsInput + name string + logGroupId string + startTime time.Time + endTime time.Time + expected *cloudwatchlogs.FilterLogEventsInput } func TestFilterLogEventsInput(t *testing.T) { now, _ := time.Parse(time.RFC3339, "2024-07-12T13:00:00+00:00") + id := "myLogGroup" + testCases := []filterLogEventsTestCase{ { - name: "StartPosition: beginning, first iteration", - logGroup: "a", + name: "StartPosition: beginning, first iteration", + logGroupId: id, // The zero value of type time.Time{} is January 1, year 1, 00:00:00.000000000 UTC // Events with a timestamp before the time - January 1, 1970, 00:00:00 UTC are not returned by AWS API // make sure zero value of time.Time{} was converted startTime: time.Time{}, endTime: now, expected: &cloudwatchlogs.FilterLogEventsInput{ - LogGroupName: awssdk.String("a"), - StartTime: awssdk.Int64(0), - EndTime: awssdk.Int64(1720789200000), + LogGroupIdentifier: awssdk.String(id), + StartTime: awssdk.Int64(0), + EndTime: awssdk.Int64(1720789200000), }, }, } for _, test := range testCases { p := cloudwatchPoller{} - result := p.constructFilterLogEventsInput(test.startTime, test.endTime, test.logGroup) + result := p.constructFilterLogEventsInput(test.startTime, test.endTime, test.logGroupId) assert.Equal(t, test.expected, result) } diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index d10ae348d941..f66e403a1a90 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -62,25 +62,13 @@ type cloudwatchInput struct { func newInput(config config) (*cloudwatchInput, error) { cfgwarn.Beta("aws-cloudwatch input type is used") + + // perform AWS configuration validation awsConfig, err := awscommon.InitializeAWSConfig(config.AWSConfig) if err != nil { return nil, fmt.Errorf("failed to initialize AWS credentials: %w", err) } - if config.LogGroupARN != "" { - logGroupName, regionName, err := parseARN(config.LogGroupARN) - if err != nil { - return nil, fmt.Errorf("parse log group ARN failed: %w", err) - } - - config.LogGroupName = logGroupName - config.RegionName = regionName - } - - if config.RegionName != "" { - awsConfig.Region = config.RegionName - } - return &cloudwatchInput{ config: config, awsConfig: awsConfig, @@ -103,15 +91,25 @@ func (in *cloudwatchInput) Run(inputContext v2.Context, pipeline beat.Pipeline) } defer client.Close() + var logGroupIDs []string + logGroupIDs, region, err := fromConfig(in.config, in.awsConfig) + if err != nil { + return fmt.Errorf("error processing configurations: %w", err) + } + + in.awsConfig.Region = region svc := cloudwatchlogs.NewFromConfig(in.awsConfig, func(o *cloudwatchlogs.Options) { if in.config.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } }) - logGroupNames, err := getLogGroupNames(svc, in.config.LogGroupNamePrefix, in.config.LogGroupName) - if err != nil { - return fmt.Errorf("failed to get log group names: %w", err) + if len(logGroupIDs) == 0 { + // fallback to LogGroupNamePrefix to derive group IDs + logGroupIDs, err = getLogGroupNames(svc, in.config.LogGroupNamePrefix) + if err != nil { + return fmt.Errorf("failed to get log group names from LogGroupNamePrefix: %w", err) + } } log := inputContext.Logger @@ -120,36 +118,54 @@ func (in *cloudwatchInput) Run(inputContext v2.Context, pipeline beat.Pipeline) cwPoller := newCloudwatchPoller( log.Named("cloudwatch_poller"), in.metrics, - in.awsConfig.Region, + region, in.config) logProcessor := newLogProcessor(log.Named("log_processor"), in.metrics, client, ctx) - cwPoller.metrics.logGroupsTotal.Add(uint64(len(logGroupNames))) + cwPoller.metrics.logGroupsTotal.Add(uint64(len(logGroupIDs))) cwPoller.startWorkers(ctx, svc, logProcessor) - cwPoller.receive(ctx, logGroupNames, time.Now) + cwPoller.receive(ctx, logGroupIDs, time.Now) return nil } -func parseARN(logGroupARN string) (string, string, error) { - arnParsed, err := arn.Parse(logGroupARN) - if err != nil { - return "", "", fmt.Errorf("error Parse arn %s: %w", logGroupARN, err) - } +// fromConfig is a helper to parse input configurations and derive logGroupIDs & aws region +// Returned logGroupIDs could be empty, which require other fallback mechanisms to derive them. +// See getLogGroupNames for example. +func fromConfig(cfg config, awsCfg awssdk.Config) (logGroupIDs []string, region string, err error) { + // LogGroupARN has precedence over LogGroupName & RegionName + if cfg.LogGroupARN != "" { + parsedArn, err := arn.Parse(cfg.LogGroupARN) + if err != nil { + return nil, "", fmt.Errorf("failed to parse log group ARN: %w", err) + } - if strings.Contains(arnParsed.Resource, ":") { - resourceARNSplit := strings.Split(arnParsed.Resource, ":") - if len(resourceARNSplit) >= 2 && resourceARNSplit[0] == "log-group" { - return resourceARNSplit[1], arnParsed.Region, nil + if parsedArn.Region == "" { + return nil, "", fmt.Errorf("failed to parse log group ARN: missing region") } + + // refine to match AWS API parameter regex of logGroupIdentifier + groupId := strings.TrimSuffix(cfg.LogGroupARN, ":*") + logGroupIDs = append(logGroupIDs, groupId) + + return logGroupIDs, parsedArn.Region, nil } - return "", "", fmt.Errorf("cannot get log group name from log group ARN: %s", logGroupARN) -} -// getLogGroupNames uses DescribeLogGroups API to retrieve all log group names -func getLogGroupNames(svc *cloudwatchlogs.Client, logGroupNamePrefix string, logGroupName string) ([]string, error) { - if logGroupNamePrefix == "" { - return []string{logGroupName}, nil + // then fallback to LogrGroupName + if cfg.LogGroupName != "" { + logGroupIDs = append(logGroupIDs, cfg.LogGroupName) + } + + // finally derive region + if cfg.RegionName != "" { + region = cfg.RegionName + } else { + region = awsCfg.Region } + return logGroupIDs, region, nil +} + +// getLogGroupNames uses DescribeLogGroups API to retrieve all log group names +func getLogGroupNames(svc *cloudwatchlogs.Client, logGroupNamePrefix string) ([]string, error) { // construct DescribeLogGroupsInput describeLogGroupsInput := &cloudwatchlogs.DescribeLogGroupsInput{ LogGroupNamePrefix: awssdk.String(logGroupNamePrefix), diff --git a/x-pack/filebeat/input/awscloudwatch/input_test.go b/x-pack/filebeat/input/awscloudwatch/input_test.go index 25ecc18ea57c..4d8c6e84e2b5 100644 --- a/x-pack/filebeat/input/awscloudwatch/input_test.go +++ b/x-pack/filebeat/input/awscloudwatch/input_test.go @@ -50,9 +50,97 @@ func TestCreateEvent(t *testing.T) { assert.Equal(t, expectedEventFields, event.Fields) } -func TestParseARN(t *testing.T) { - logGroup, regionName, err := parseARN("arn:aws:logs:us-east-1:428152502467:log-group:test:*") - assert.Equal(t, "test", logGroup) - assert.Equal(t, "us-east-1", regionName) - assert.NoError(t, err) +func Test_FromConfig(t *testing.T) { + tests := []struct { + name string + cfg config + awsCfg awssdk.Config + expectGroups []string + expectRegion string + isError bool + }{ + { + name: "Valid log group ARN", + cfg: config{ + LogGroupARN: "arn:aws:logs:us-east-1:123456789012:myLogs", + }, + awsCfg: awssdk.Config{ + Region: "us-east-1", + }, + expectGroups: []string{"arn:aws:logs:us-east-1:123456789012:myLogs"}, + expectRegion: "us-east-1", + isError: false, + }, + { + name: "Invalid ARN results in an error", + cfg: config{ + LogGroupARN: "invalidARN", + }, + awsCfg: awssdk.Config{ + Region: "us-east-1", + }, + expectRegion: "", + isError: true, + }, + { + name: "Valid log group ARN but empty region cause error", + cfg: config{ + LogGroupARN: "arn:aws:logs::123456789012:otherLogs", + }, + awsCfg: awssdk.Config{ + Region: "us-east-1", + }, + expectRegion: "", + isError: true, + }, + { + name: "ARN suffix trimming to match logGroupIdentifier requirement", + cfg: config{ + LogGroupARN: "arn:aws:logs:us-east-1:123456789012:log-group:/aws/kinesisfirehose/ProjectA:*", + }, + awsCfg: awssdk.Config{ + Region: "us-east-1", + }, + expectGroups: []string{"arn:aws:logs:us-east-1:123456789012:log-group:/aws/kinesisfirehose/ProjectA"}, + expectRegion: "us-east-1", + isError: false, + }, + { + name: "LogGroupName only", + cfg: config{ + LogGroupName: "myLogGroup", + }, + awsCfg: awssdk.Config{ + Region: "us-east-1", + }, + expectGroups: []string{"myLogGroup"}, + expectRegion: "us-east-1", + isError: false, + }, + { + name: "LogGroupName and region override", + cfg: config{ + LogGroupName: "myLogGroup", + RegionName: "sa-east-1", + }, + awsCfg: awssdk.Config{ + Region: "us-east-1", + }, + expectGroups: []string{"myLogGroup"}, + expectRegion: "sa-east-1", + isError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + groups, region, err := fromConfig(tt.cfg, tt.awsCfg) + if tt.isError { + assert.Error(t, err) + } + + assert.Equal(t, tt.expectGroups, groups) + assert.Equal(t, tt.expectRegion, region) + }) + } } diff --git a/x-pack/filebeat/input/awscloudwatch/processor.go b/x-pack/filebeat/input/awscloudwatch/processor.go index 818ba85d57ec..c0be36921633 100644 --- a/x-pack/filebeat/input/awscloudwatch/processor.go +++ b/x-pack/filebeat/input/awscloudwatch/processor.go @@ -32,22 +32,22 @@ func newLogProcessor(log *logp.Logger, metrics *inputMetrics, publisher beat.Cli } } -func (p *logProcessor) processLogEvents(logEvents []types.FilteredLogEvent, logGroup string, regionName string) { +func (p *logProcessor) processLogEvents(logEvents []types.FilteredLogEvent, logGroupId string, regionName string) { for _, logEvent := range logEvents { - event := createEvent(logEvent, logGroup, regionName) + event := createEvent(logEvent, logGroupId, regionName) p.metrics.cloudwatchEventsCreatedTotal.Inc() p.publisher.Publish(event) } } -func createEvent(logEvent types.FilteredLogEvent, logGroup string, regionName string) beat.Event { +func createEvent(logEvent types.FilteredLogEvent, logGroupId string, regionName string) beat.Event { event := beat.Event{ Timestamp: time.Unix(*logEvent.Timestamp/1000, 0).UTC(), Fields: mapstr.M{ "message": *logEvent.Message, "log": mapstr.M{ "file": mapstr.M{ - "path": logGroup + "/" + *logEvent.LogStreamName, + "path": logGroupId + "/" + *logEvent.LogStreamName, }, }, "event": mapstr.M{ @@ -55,7 +55,7 @@ func createEvent(logEvent types.FilteredLogEvent, logGroup string, regionName st "ingested": time.Now(), }, "aws.cloudwatch": mapstr.M{ - "log_group": logGroup, + "log_group": logGroupId, "log_stream": *logEvent.LogStreamName, "ingestion_time": time.Unix(*logEvent.IngestionTime/1000, 0), }, From 7e1b5280675811212413fa0812d0f9da50ee607b Mon Sep 17 00:00:00 2001 From: Kavindu Dodanduwa Date: Tue, 15 Oct 2024 13:09:32 -0700 Subject: [PATCH 38/90] [Filebeat] [AWS] add support to source logs from AWS linked source accounts when using log_group_name_prefix (#41206) * configuration parsing to support arn & linked accounts Signed-off-by: Kavindu Dodanduwa # Conflicts: # x-pack/filebeat/input/awscloudwatch/input.go * code review change - fix typo Signed-off-by: Kavindu Dodanduwa * add support to linked accounts when using prefix mode Signed-off-by: Kavindu Dodanduwa * add changelog entry Signed-off-by: Kavindu Dodanduwa * review suggestion Signed-off-by: Kavindu Dodanduwa * use non-pointer struct property Signed-off-by: Kavindu Dodanduwa --------- Signed-off-by: Kavindu Dodanduwa --- CHANGELOG.next.asciidoc | 1 + .../filebeat.inputs.reference.xpack.yml.tmpl | 5 ++++ .../docs/inputs/input-aws-cloudwatch.asciidoc | 10 ++++++- x-pack/filebeat/filebeat.reference.yml | 5 ++++ x-pack/filebeat/input/awscloudwatch/config.go | 29 ++++++++++--------- x-pack/filebeat/input/awscloudwatch/input.go | 18 +++++++----- 6 files changed, 45 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2ba3d43c93f6..76062b5a19d7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -328,6 +328,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add support to source AWS cloudwatch logs from linked accounts. {pull}41188[41188] - Jounrald input now supports filtering by facilities {pull}41061[41061] - System module now supports reading from jounrald. {pull}41061[41061] +- Add support to include AWS cloudwatch linked accounts when using log_group_name_prefix to define log group names. {pull}41206[41206] *Auditbeat* diff --git a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl index 3f131b6dc494..4a2065ddf6af 100644 --- a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl +++ b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl @@ -144,10 +144,15 @@ #log_group_name: test # The prefix for a group of log group names. + # You can include linked source accounts by using the property `include_linked_accounts_for_prefix_mode`. # Note: `region_name` is required when `log_group_name_prefix` is given. # `log_group_name` and `log_group_name_prefix` cannot be given at the same time. #log_group_name_prefix: /aws/ + # State whether to include linked source accounts when obtaining log groups matching the prefix provided through `log_group_name_prefix` + # This property works together with `log_group_name_prefix` and default value (if unset) is false + #include_linked_accounts_for_prefix_mode: true + # Region that the specified log group or log group prefix belongs to. #region_name: us-east-1 diff --git a/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc index 733f0bac41f5..d986e9e6b20b 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc @@ -56,13 +56,21 @@ Note: `region_name` is required when log_group_name is given. [float] ==== `log_group_name_prefix` -The prefix for a group of log group names. +The prefix for a group of log group names. See `include_linked_accounts_for_prefix_mode` option for linked source accounts behavior. Note: `region_name` is required when `log_group_name_prefix` is given. `log_group_name` and `log_group_name_prefix` cannot be given at the same time. The number of workers that will process the log groups under this prefix is set through the `number_of_workers` config. +[float] +==== `include_linked_accounts_for_prefix_mode` +Configure whether to include linked source accounts that contains the prefix value defined through `log_group_name_prefix`. +Accepts a boolean and this is by default disabled. + +Note: Utilize `log_group_arn` if you desire to obtain logs from a known log group (including linked source accounts) +You can read more about AWS account linking and cross account observability from the https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html[official documentation]. + [float] ==== `region_name` Region that the specified log group or log group prefix belongs to. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index a2f1daeebb4f..4dedabc28e34 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -3078,10 +3078,15 @@ filebeat.inputs: #log_group_name: test # The prefix for a group of log group names. + # You can include linked source accounts by using the property `include_linked_accounts_for_prefix_mode`. # Note: `region_name` is required when `log_group_name_prefix` is given. # `log_group_name` and `log_group_name_prefix` cannot be given at the same time. #log_group_name_prefix: /aws/ + # State whether to include linked source accounts when obtaining log groups matching the prefix provided through `log_group_name_prefix` + # This property works together with `log_group_name_prefix` and default value (if unset) is false + #include_linked_accounts_for_prefix_mode: true + # Region that the specified log group or log group prefix belongs to. #region_name: us-east-1 diff --git a/x-pack/filebeat/input/awscloudwatch/config.go b/x-pack/filebeat/input/awscloudwatch/config.go index 438aceeb19e6..5e826aa09fd7 100644 --- a/x-pack/filebeat/input/awscloudwatch/config.go +++ b/x-pack/filebeat/input/awscloudwatch/config.go @@ -13,20 +13,21 @@ import ( ) type config struct { - harvester.ForwarderConfig `config:",inline"` - LogGroupARN string `config:"log_group_arn"` - LogGroupName string `config:"log_group_name"` - LogGroupNamePrefix string `config:"log_group_name_prefix"` - RegionName string `config:"region_name"` - LogStreams []*string `config:"log_streams"` - LogStreamPrefix string `config:"log_stream_prefix"` - StartPosition string `config:"start_position" default:"beginning"` - ScanFrequency time.Duration `config:"scan_frequency" validate:"min=0,nonzero"` - APITimeout time.Duration `config:"api_timeout" validate:"min=0,nonzero"` - APISleep time.Duration `config:"api_sleep" validate:"min=0,nonzero"` - Latency time.Duration `config:"latency"` - NumberOfWorkers int `config:"number_of_workers"` - AWSConfig awscommon.ConfigAWS `config:",inline"` + harvester.ForwarderConfig `config:",inline"` + LogGroupARN string `config:"log_group_arn"` + LogGroupName string `config:"log_group_name"` + LogGroupNamePrefix string `config:"log_group_name_prefix"` + IncludeLinkedAccountsForPrefixMode bool `config:"include_linked_accounts_for_prefix_mode"` + RegionName string `config:"region_name"` + LogStreams []*string `config:"log_streams"` + LogStreamPrefix string `config:"log_stream_prefix"` + StartPosition string `config:"start_position" default:"beginning"` + ScanFrequency time.Duration `config:"scan_frequency" validate:"min=0,nonzero"` + APITimeout time.Duration `config:"api_timeout" validate:"min=0,nonzero"` + APISleep time.Duration `config:"api_sleep" validate:"min=0,nonzero"` + Latency time.Duration `config:"latency"` + NumberOfWorkers int `config:"number_of_workers"` + AWSConfig awscommon.ConfigAWS `config:",inline"` } func defaultConfig() config { diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index f66e403a1a90..27b1da04d1a7 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -105,8 +105,9 @@ func (in *cloudwatchInput) Run(inputContext v2.Context, pipeline beat.Pipeline) }) if len(logGroupIDs) == 0 { - // fallback to LogGroupNamePrefix to derive group IDs - logGroupIDs, err = getLogGroupNames(svc, in.config.LogGroupNamePrefix) + // We haven't extracted group identifiers directly from the input configurations, + // now fallback to provided LogGroupNamePrefix and use derived service client to derive logGroupIDs + logGroupIDs, err = getLogGroupNames(svc, in.config.LogGroupNamePrefix, in.config.IncludeLinkedAccountsForPrefixMode) if err != nil { return fmt.Errorf("failed to get log group names from LogGroupNamePrefix: %w", err) } @@ -164,15 +165,16 @@ func fromConfig(cfg config, awsCfg awssdk.Config) (logGroupIDs []string, region return logGroupIDs, region, nil } -// getLogGroupNames uses DescribeLogGroups API to retrieve all log group names -func getLogGroupNames(svc *cloudwatchlogs.Client, logGroupNamePrefix string) ([]string, error) { +// getLogGroupNames uses DescribeLogGroups API to retrieve LogGroupArn entries that matches the provided logGroupNamePrefix +func getLogGroupNames(svc *cloudwatchlogs.Client, logGroupNamePrefix string, withLinkedAccount bool) ([]string, error) { // construct DescribeLogGroupsInput describeLogGroupsInput := &cloudwatchlogs.DescribeLogGroupsInput{ - LogGroupNamePrefix: awssdk.String(logGroupNamePrefix), + LogGroupNamePrefix: awssdk.String(logGroupNamePrefix), + IncludeLinkedAccounts: awssdk.Bool(withLinkedAccount), } // make API request - var logGroupNames []string + var logGroupIDs []string paginator := cloudwatchlogs.NewDescribeLogGroupsPaginator(svc, describeLogGroupsInput) for paginator.HasMorePages() { page, err := paginator.NextPage(context.TODO()) @@ -181,8 +183,8 @@ func getLogGroupNames(svc *cloudwatchlogs.Client, logGroupNamePrefix string) ([] } for _, lg := range page.LogGroups { - logGroupNames = append(logGroupNames, *lg.LogGroupName) + logGroupIDs = append(logGroupIDs, *lg.LogGroupArn) } } - return logGroupNames, nil + return logGroupIDs, nil } From d2867fdd9fb7845922cf3b50d82c594234486d06 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Tue, 15 Oct 2024 22:51:59 -0400 Subject: [PATCH 39/90] Add asynchronous ACK handling to S3 and SQS inputs (#40699) Modify SQS ingestion to listen for ACKs asynchronously so that input workers can keep reading new objects after a previous one has been published, instead of blocking on full upstream ingestion. This addresses the bottleneck where ingesting many small objects is slow as each one waits for a full ingestion round trip. With a default configuration, SQS queues with many small objects are now ingested up to 60x faster. --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 32 +++ go.mod | 1 + go.sum | 2 + .../filebeat.inputs.reference.xpack.yml.tmpl | 4 +- .../docs/inputs/input-aws-s3.asciidoc | 14 +- x-pack/filebeat/filebeat.reference.yml | 34 +--- x-pack/filebeat/input/awss3/acks.go | 106 ++++++++++ x-pack/filebeat/input/awss3/config.go | 59 +++--- x-pack/filebeat/input/awss3/config_test.go | 34 +--- .../input/awss3/input_benchmark_test.go | 48 ++--- .../input/awss3/input_integration_test.go | 2 +- x-pack/filebeat/input/awss3/interfaces.go | 34 +--- .../input/awss3/mock_interfaces_test.go | 189 ++---------------- x-pack/filebeat/input/awss3/s3.go | 6 +- x-pack/filebeat/input/awss3/s3_input.go | 98 +++++---- x-pack/filebeat/input/awss3/s3_objects.go | 107 ++++------ .../filebeat/input/awss3/s3_objects_test.go | 59 ++---- x-pack/filebeat/input/awss3/s3_test.go | 12 +- x-pack/filebeat/input/awss3/sqs_input.go | 95 ++++++--- x-pack/filebeat/input/awss3/sqs_s3_event.go | 152 ++++++++------ .../filebeat/input/awss3/sqs_s3_event_test.go | 99 ++++----- x-pack/filebeat/input/awss3/sqs_test.go | 42 +++- x-pack/filebeat/module/aws/_meta/config.yml | 30 +-- .../module/aws/cloudtrail/config/aws-s3.yml | 4 - .../module/aws/cloudtrail/manifest.yml | 1 - .../module/aws/s3access/config/aws-s3.yml | 4 - .../filebeat/module/aws/s3access/manifest.yml | 1 - .../module/aws/vpcflow/config/input.yml | 4 - .../filebeat/module/aws/vpcflow/manifest.yml | 1 - x-pack/filebeat/modules.d/aws.yml.disabled | 30 +-- 31 files changed, 593 insertions(+), 712 deletions(-) create mode 100644 x-pack/filebeat/input/awss3/acks.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 76062b5a19d7..72ff8083fea3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -46,6 +46,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Added `container.image.name` to `journald` Filebeat input's Docker-specific translated fields. {pull}40450[40450] - Change log.file.path field in awscloudwatch input to nested object. {pull}41099[41099] - Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] +- The performance of ingesting SQS data with the S3 input has improved by up to 60x for queues with many small events. `max_number_of_messages` config for SQS mode is now ignored, as the new design no longer needs a manual cap on messages. Instead, use `number_of_workers` to scale ingestion rate in both S3 and SQS modes. The increased efficiency may increase network bandwidth consumption, which can be throttled by lowering `number_of_workers`. It may also increase number of events stored in memory, which can be throttled by lowering the configured size of the internal queue. {pull}40699[40699] - System module events now contain `input.type: systemlogs` instead of `input.type: log` when harvesting log files. {pull}41061[41061] diff --git a/NOTICE.txt b/NOTICE.txt index bb5807f9a419..4447873499fc 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -23112,6 +23112,38 @@ Contents of probable licence file $GOMODCACHE/github.com/xdg-go/scram@v1.1.2/LIC of your accepting any such warranty or additional liability. +-------------------------------------------------------------------------------- +Dependency : github.com/zyedidia/generic +Version: v1.2.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/zyedidia/generic@v1.2.1/LICENSE: + +MIT License + +Copyright (c) 2021: Zachary Yedidia. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : go.elastic.co/apm/module/apmelasticsearch/v2 Version: v2.6.0 diff --git a/go.mod b/go.mod index c643f16b1fa4..9459bb0f13e4 100644 --- a/go.mod +++ b/go.mod @@ -214,6 +214,7 @@ require ( github.com/shirou/gopsutil/v3 v3.22.10 github.com/tklauser/go-sysconf v0.3.10 github.com/xdg-go/scram v1.1.2 + github.com/zyedidia/generic v1.2.1 go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 go.elastic.co/apm/module/apmhttp/v2 v2.6.0 go.elastic.co/apm/v2 v2.6.0 diff --git a/go.sum b/go.sum index 4f561fa3d6ec..1a57c8132766 100644 --- a/go.sum +++ b/go.sum @@ -941,6 +941,8 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc= +github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 h1:ukMcwyMaDXsS1dRK2qRYXT2AsfwaUy74TOOYCqkWJow= diff --git a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl index 4a2065ddf6af..4188035f832a 100644 --- a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl +++ b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl @@ -79,8 +79,8 @@ # SQS queue URL to receive messages from (required). #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-aws-s3-logs-queue" - # Maximum number of SQS messages that can be inflight at any time. - #max_number_of_messages: 5 + # Number of workers on S3 bucket or SQS queue + #number_of_workers: 5 # Maximum duration of an AWS API call (excluding S3 GetObject calls). #api_timeout: 120s diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 43d4b102f639..aa8ecbf72595 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -307,18 +307,6 @@ The maximum number of bytes that a single log message can have. All bytes after multiline log messages, which can get large. This only applies to non-JSON logs. The default is `10 MiB`. -[float] -==== `max_number_of_messages` - -The maximum number of SQS messages that can be inflight at any time. Defaults -to 5. Setting this parameter too high can overload Elastic Agent and cause -ingest failures in situations where the SQS messages contain many S3 objects -or the S3 objects themselves contain large numbers of messages. -We recommend to keep the default value 5 and use the `Balanced` or `Optimized for -Throughput` setting in the -{fleet-guide}/es-output-settings.html#es-output-settings-performance-tuning-settings[preset] -options to tune your Elastic Agent performance. - [id="input-{type}-parsers"] [float] ==== `parsers` @@ -504,7 +492,7 @@ Prefix to apply for the list request to the S3 bucket. Default empty. [float] ==== `number_of_workers` -Number of workers that will process the S3 objects listed. (Required when `bucket_arn` is set). +Number of workers that will process the S3 or SQS objects listed. Required when `bucket_arn` is set, otherwise (in the SQS case) defaults to 5. [float] diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 4dedabc28e34..c00099c36670 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -139,7 +139,7 @@ filebeat.modules: # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Process CloudTrail logs @@ -188,9 +188,6 @@ filebeat.modules: # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -212,7 +209,7 @@ filebeat.modules: # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -249,9 +246,6 @@ filebeat.modules: # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -273,7 +267,7 @@ filebeat.modules: # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -310,9 +304,6 @@ filebeat.modules: # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -334,7 +325,7 @@ filebeat.modules: # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -371,9 +362,6 @@ filebeat.modules: # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -395,7 +383,7 @@ filebeat.modules: # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -432,9 +420,6 @@ filebeat.modules: # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -456,7 +441,7 @@ filebeat.modules: # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -493,9 +478,6 @@ filebeat.modules: # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -3013,8 +2995,8 @@ filebeat.inputs: # SQS queue URL to receive messages from (required). #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-aws-s3-logs-queue" - # Maximum number of SQS messages that can be inflight at any time. - #max_number_of_messages: 5 + # Number of workers on S3 bucket or SQS queue + #number_of_workers: 5 # Maximum duration of an AWS API call (excluding S3 GetObject calls). #api_timeout: 120s diff --git a/x-pack/filebeat/input/awss3/acks.go b/x-pack/filebeat/input/awss3/acks.go new file mode 100644 index 000000000000..a3850c01e87a --- /dev/null +++ b/x-pack/filebeat/input/awss3/acks.go @@ -0,0 +1,106 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package awss3 + +import ( + "github.com/zyedidia/generic/queue" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common/acker" +) + +type awsACKHandler struct { + pending *queue.Queue[pendingACK] + ackedCount int + + pendingChan chan pendingACK + ackChan chan int +} + +type pendingACK struct { + eventCount int + ackCallback func() +} + +func newAWSACKHandler() *awsACKHandler { + handler := &awsACKHandler{ + pending: queue.New[pendingACK](), + + // Channel buffer sizes are somewhat arbitrary: synchronous channels + // would be safe, but buffers slightly reduce scheduler overhead since + // the ack loop goroutine doesn't need to wake up as often. + // + // pendingChan receives one message each time an S3/SQS worker goroutine + // finishes processing an object. If it is full, workers will not be able + // to advance to the next object until the ack loop wakes up. + // + // ackChan receives approximately one message every time an acknowledged + // batch of events contains at least one event from this input. (Sometimes + // fewer if messages can be coalesced.) If it is full, acknowledgement + // notifications for inputs/queue will stall until the ack loop wakes up. + // (This is a much worse consequence than pendingChan, but ackChan also + // receives fewer messages than pendingChan by a factor of ~thousands, + // so in practice it's still low-impact.) + pendingChan: make(chan pendingACK, 10), + ackChan: make(chan int, 10), + } + go handler.run() + return handler +} + +func (ah *awsACKHandler) Add(eventCount int, ackCallback func()) { + ah.pendingChan <- pendingACK{ + eventCount: eventCount, + ackCallback: ackCallback, + } +} + +// Called when a worker is closing, to indicate to the ack handler that it +// should shut down as soon as the current pending list is acknowledged. +func (ah *awsACKHandler) Close() { + close(ah.pendingChan) +} + +func (ah *awsACKHandler) pipelineEventListener() beat.EventListener { + return acker.TrackingCounter(func(_ int, total int) { + // Notify the ack handler goroutine + ah.ackChan <- total + }) +} + +// Listener that handles both incoming metadata and ACK +// confirmations. +func (ah *awsACKHandler) run() { + for { + select { + case result, ok := <-ah.pendingChan: + if ok { + ah.pending.Enqueue(result) + } else { + // Channel is closed, reset so we don't receive any more values + ah.pendingChan = nil + } + case count := <-ah.ackChan: + ah.ackedCount += count + } + + // Finalize any objects that are now completed + for !ah.pending.Empty() && ah.ackedCount >= ah.pending.Peek().eventCount { + result := ah.pending.Dequeue() + ah.ackedCount -= result.eventCount + // Run finalization asynchronously so we don't block the SQS worker + // or the queue by ignoring the ack handler's input channels. Ordering + // is no longer important at this point. + if result.ackCallback != nil { + go result.ackCallback() + } + } + + // If the input is closed and all acks are completed, we're done + if ah.pending.Empty() && ah.pendingChan == nil { + return + } + } +} diff --git a/x-pack/filebeat/input/awss3/config.go b/x-pack/filebeat/input/awss3/config.go index b85c3f3871c9..d80108590ce5 100644 --- a/x-pack/filebeat/input/awss3/config.go +++ b/x-pack/filebeat/input/awss3/config.go @@ -24,37 +24,36 @@ import ( ) type config struct { - APITimeout time.Duration `config:"api_timeout"` - VisibilityTimeout time.Duration `config:"visibility_timeout"` - SQSWaitTime time.Duration `config:"sqs.wait_time"` // The max duration for which the SQS ReceiveMessage call waits for a message to arrive in the queue before returning. - SQSMaxReceiveCount int `config:"sqs.max_receive_count"` // The max number of times a message should be received (retried) before deleting it. - SQSScript *scriptConfig `config:"sqs.notification_parsing_script"` - MaxNumberOfMessages int `config:"max_number_of_messages"` - QueueURL string `config:"queue_url"` - RegionName string `config:"region"` - BucketARN string `config:"bucket_arn"` - NonAWSBucketName string `config:"non_aws_bucket_name"` - BucketListInterval time.Duration `config:"bucket_list_interval"` - BucketListPrefix string `config:"bucket_list_prefix"` - NumberOfWorkers int `config:"number_of_workers"` - AWSConfig awscommon.ConfigAWS `config:",inline"` - FileSelectors []fileSelectorConfig `config:"file_selectors"` - ReaderConfig readerConfig `config:",inline"` // Reader options to apply when no file_selectors are used. - PathStyle bool `config:"path_style"` - ProviderOverride string `config:"provider"` - BackupConfig backupConfig `config:",inline"` + APITimeout time.Duration `config:"api_timeout"` + VisibilityTimeout time.Duration `config:"visibility_timeout"` + SQSWaitTime time.Duration `config:"sqs.wait_time"` // The max duration for which the SQS ReceiveMessage call waits for a message to arrive in the queue before returning. + SQSMaxReceiveCount int `config:"sqs.max_receive_count"` // The max number of times a message should be received (retried) before deleting it. + SQSScript *scriptConfig `config:"sqs.notification_parsing_script"` + QueueURL string `config:"queue_url"` + RegionName string `config:"region"` + BucketARN string `config:"bucket_arn"` + NonAWSBucketName string `config:"non_aws_bucket_name"` + BucketListInterval time.Duration `config:"bucket_list_interval"` + BucketListPrefix string `config:"bucket_list_prefix"` + NumberOfWorkers int `config:"number_of_workers"` + AWSConfig awscommon.ConfigAWS `config:",inline"` + FileSelectors []fileSelectorConfig `config:"file_selectors"` + ReaderConfig readerConfig `config:",inline"` // Reader options to apply when no file_selectors are used. + PathStyle bool `config:"path_style"` + ProviderOverride string `config:"provider"` + BackupConfig backupConfig `config:",inline"` } func defaultConfig() config { c := config{ - APITimeout: 120 * time.Second, - VisibilityTimeout: 300 * time.Second, - BucketListInterval: 120 * time.Second, - BucketListPrefix: "", - SQSWaitTime: 20 * time.Second, - SQSMaxReceiveCount: 5, - MaxNumberOfMessages: 5, - PathStyle: false, + APITimeout: 120 * time.Second, + VisibilityTimeout: 300 * time.Second, + BucketListInterval: 120 * time.Second, + BucketListPrefix: "", + SQSWaitTime: 20 * time.Second, + SQSMaxReceiveCount: 5, + NumberOfWorkers: 5, + PathStyle: false, } c.ReaderConfig.InitDefaults() return c @@ -93,11 +92,6 @@ func (c *config) Validate() error { "less than or equal to 20s", c.SQSWaitTime) } - if c.QueueURL != "" && c.MaxNumberOfMessages <= 0 { - return fmt.Errorf("max_number_of_messages <%v> must be greater than 0", - c.MaxNumberOfMessages) - } - if c.QueueURL != "" && c.APITimeout < c.SQSWaitTime { return fmt.Errorf("api_timeout <%v> must be greater than the sqs.wait_time <%v", c.APITimeout, c.SQSWaitTime) @@ -252,6 +246,7 @@ func (c config) getBucketARN() string { // Should be provided as a parameter to s3.NewFromConfig. func (c config) s3ConfigModifier(o *s3.Options) { if c.NonAWSBucketName != "" { + //nolint:staticcheck // haven't migrated to the new interface yet o.EndpointResolver = nonAWSBucketResolver{endpoint: c.AWSConfig.Endpoint} } diff --git a/x-pack/filebeat/input/awss3/config_test.go b/x-pack/filebeat/input/awss3/config_test.go index 651f8099d919..907a5854b284 100644 --- a/x-pack/filebeat/input/awss3/config_test.go +++ b/x-pack/filebeat/input/awss3/config_test.go @@ -30,17 +30,17 @@ func TestConfig(t *testing.T) { parserConf := parser.Config{} require.NoError(t, parserConf.Unpack(conf.MustNewConfigFrom(""))) return config{ - QueueURL: quequeURL, - BucketARN: s3Bucket, - NonAWSBucketName: nonAWSS3Bucket, - APITimeout: 120 * time.Second, - VisibilityTimeout: 300 * time.Second, - SQSMaxReceiveCount: 5, - SQSWaitTime: 20 * time.Second, - BucketListInterval: 120 * time.Second, - BucketListPrefix: "", - PathStyle: false, - MaxNumberOfMessages: 5, + QueueURL: quequeURL, + BucketARN: s3Bucket, + NonAWSBucketName: nonAWSS3Bucket, + APITimeout: 120 * time.Second, + VisibilityTimeout: 300 * time.Second, + SQSMaxReceiveCount: 5, + SQSWaitTime: 20 * time.Second, + BucketListInterval: 120 * time.Second, + BucketListPrefix: "", + PathStyle: false, + NumberOfWorkers: 5, ReaderConfig: readerConfig{ BufferSize: 16 * humanize.KiByte, MaxBytes: 10 * humanize.MiByte, @@ -304,18 +304,6 @@ func TestConfig(t *testing.T) { expectedErr: "number_of_workers <0> must be greater than 0", expectedCfg: nil, }, - { - name: "error on max_number_of_messages == 0", - queueURL: queueURL, - s3Bucket: "", - nonAWSS3Bucket: "", - config: mapstr.M{ - "queue_url": queueURL, - "max_number_of_messages": "0", - }, - expectedErr: "max_number_of_messages <0> must be greater than 0", - expectedCfg: nil, - }, { name: "error on buffer_size == 0 ", queueURL: queueURL, diff --git a/x-pack/filebeat/input/awss3/input_benchmark_test.go b/x-pack/filebeat/input/awss3/input_benchmark_test.go index 0d7d79b615be..54e227736025 100644 --- a/x-pack/filebeat/input/awss3/input_benchmark_test.go +++ b/x-pack/filebeat/input/awss3/input_benchmark_test.go @@ -27,8 +27,6 @@ import ( "github.com/dustin/go-humanize" "github.com/olekukonko/tablewriter" - pubtest "github.com/elastic/beats/v7/libbeat/publisher/testing" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/monitoring" @@ -164,10 +162,17 @@ func (c constantS3) ListObjectsPaginator(string, string) s3Pager { var _ beat.Pipeline = (*fakePipeline)(nil) // fakePipeline returns new ackClients. -type fakePipeline struct{} +type fakePipeline struct { +} -func (c *fakePipeline) ConnectWith(beat.ClientConfig) (beat.Client, error) { - return &ackClient{}, nil +func newFakePipeline() *fakePipeline { + return &fakePipeline{} +} + +func (c *fakePipeline) ConnectWith(config beat.ClientConfig) (beat.Client, error) { + return &ackClient{ + eventListener: config.EventListener, + }, nil } func (c *fakePipeline) Connect() (beat.Client, error) { @@ -177,13 +182,15 @@ func (c *fakePipeline) Connect() (beat.Client, error) { var _ beat.Client = (*ackClient)(nil) // ackClient is a fake beat.Client that ACKs the published messages. -type ackClient struct{} +type ackClient struct { + eventListener beat.EventListener +} func (c *ackClient) Close() error { return nil } func (c *ackClient) Publish(event beat.Event) { - // Fake the ACK handling. - event.Private.(*awscommon.EventACKTracker).ACK() + c.eventListener.AddEvent(event, true) + go c.eventListener.ACKEvents(1) } func (c *ackClient) PublishAll(event []beat.Event) { @@ -208,20 +215,20 @@ file_selectors: return inputConfig } -func benchmarkInputSQS(t *testing.T, maxMessagesInflight int) testing.BenchmarkResult { +func benchmarkInputSQS(t *testing.T, workerCount int) testing.BenchmarkResult { return testing.Benchmark(func(b *testing.B) { var err error - pipeline := &fakePipeline{} config := makeBenchmarkConfig(t) - config.MaxNumberOfMessages = maxMessagesInflight + config.NumberOfWorkers = workerCount sqsReader := newSQSReaderInput(config, aws.Config{}) sqsReader.log = log.Named("sqs") - sqsReader.metrics = newInputMetrics("test_id", monitoring.NewRegistry(), maxMessagesInflight) + sqsReader.pipeline = newFakePipeline() + sqsReader.metrics = newInputMetrics("test_id", monitoring.NewRegistry(), workerCount) sqsReader.sqs, err = newConstantSQS() require.NoError(t, err) sqsReader.s3 = newConstantS3(t) - sqsReader.msgHandler, err = sqsReader.createEventProcessor(pipeline) + sqsReader.msgHandler, err = sqsReader.createEventProcessor() require.NoError(t, err, "createEventProcessor must succeed") ctx, cancel := context.WithCancel(context.Background()) @@ -240,7 +247,7 @@ func benchmarkInputSQS(t *testing.T, maxMessagesInflight int) testing.BenchmarkR b.StopTimer() elapsed := time.Since(start) - b.ReportMetric(float64(maxMessagesInflight), "max_messages_inflight") + b.ReportMetric(float64(workerCount), "number_of_workers") b.ReportMetric(elapsed.Seconds(), "sec") b.ReportMetric(float64(sqsReader.metrics.s3EventsCreatedTotal.Get()), "events") @@ -303,14 +310,7 @@ func benchmarkInputS3(t *testing.T, numberOfWorkers int) testing.BenchmarkResult metricRegistry := monitoring.NewRegistry() metrics := newInputMetrics("test_id", metricRegistry, numberOfWorkers) - - client := pubtest.NewChanClientWithCallback(100, func(event beat.Event) { - event.Private.(*awscommon.EventACKTracker).ACK() - }) - - defer func() { - _ = client.Close() - }() + pipeline := newFakePipeline() config := makeBenchmarkConfig(t) config.NumberOfWorkers = numberOfWorkers @@ -342,13 +342,13 @@ func benchmarkInputS3(t *testing.T, numberOfWorkers int) testing.BenchmarkResult states, err := newStates(nil, store) assert.NoError(t, err, "states creation should succeed") - s3EventHandlerFactory := newS3ObjectProcessorFactory(log.Named("s3"), metrics, s3API, config.FileSelectors, backupConfig{}) + s3EventHandlerFactory := newS3ObjectProcessorFactory(metrics, s3API, config.FileSelectors, backupConfig{}) s3Poller := &s3PollerInput{ log: logp.NewLogger(inputName), config: config, metrics: metrics, s3: s3API, - client: client, + pipeline: pipeline, s3ObjectHandler: s3EventHandlerFactory, states: states, provider: "provider", diff --git a/x-pack/filebeat/input/awss3/input_integration_test.go b/x-pack/filebeat/input/awss3/input_integration_test.go index 88d81a9f0c8b..9303c5c72599 100644 --- a/x-pack/filebeat/input/awss3/input_integration_test.go +++ b/x-pack/filebeat/input/awss3/input_integration_test.go @@ -112,7 +112,7 @@ file_selectors: func makeTestConfigSQS(queueURL string) *conf.C { return conf.MustNewConfigFrom(fmt.Sprintf(`--- queue_url: %s -max_number_of_messages: 1 +number_of_workers: 1 visibility_timeout: 30s region: us-east-1 file_selectors: diff --git a/x-pack/filebeat/input/awss3/interfaces.go b/x-pack/filebeat/input/awss3/interfaces.go index 5e9eb13d243a..6a3b119303be 100644 --- a/x-pack/filebeat/input/awss3/interfaces.go +++ b/x-pack/filebeat/input/awss3/interfaces.go @@ -17,7 +17,6 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/elastic/beats/v7/libbeat/beat" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -41,25 +40,9 @@ import ( const s3RequestURLMetadataKey = `x-beat-s3-request-url` type sqsAPI interface { - sqsReceiver - sqsDeleter - sqsVisibilityChanger - sqsAttributeGetter -} - -type sqsReceiver interface { ReceiveMessage(ctx context.Context, maxMessages int) ([]types.Message, error) -} - -type sqsDeleter interface { DeleteMessage(ctx context.Context, msg *types.Message) error -} - -type sqsVisibilityChanger interface { ChangeMessageVisibility(ctx context.Context, msg *types.Message, timeout time.Duration) error -} - -type sqsAttributeGetter interface { GetQueueAttributes(ctx context.Context, attr []types.QueueAttributeName) (map[string]string, error) } @@ -68,7 +51,7 @@ type sqsProcessor interface { // given message and is responsible for updating the message's visibility // timeout while it is being processed and for deleting it when processing // completes successfully. - ProcessSQS(ctx context.Context, msg *types.Message) error + ProcessSQS(ctx context.Context, msg *types.Message, eventCallback func(e beat.Event)) sqsProcessingResult } // ------ @@ -103,25 +86,18 @@ type s3ObjectHandlerFactory interface { // Create returns a new s3ObjectHandler that can be used to process the // specified S3 object. If the handler is not configured to process the // given S3 object (based on key name) then it will return nil. - Create(ctx context.Context, log *logp.Logger, client beat.Client, acker *awscommon.EventACKTracker, obj s3EventV2) s3ObjectHandler + Create(ctx context.Context, obj s3EventV2) s3ObjectHandler } type s3ObjectHandler interface { // ProcessS3Object downloads the S3 object, parses it, creates events, and - // publishes them. It returns when processing finishes or when it encounters - // an unrecoverable error. It does not wait for the events to be ACKed by - // the publisher before returning (use eventACKTracker's Wait() method to - // determine this). - ProcessS3Object() error + // passes to the given callback. It returns when processing finishes or + // when it encounters an unrecoverable error. + ProcessS3Object(log *logp.Logger, eventCallback func(e beat.Event)) error // FinalizeS3Object finalizes processing of an S3 object after the current // batch is finished. FinalizeS3Object() error - - // Wait waits for every event published by ProcessS3Object() to be ACKed - // by the publisher before returning. Internally it uses the - // s3ObjectHandler eventACKTracker's Wait() method - Wait() } // ------ diff --git a/x-pack/filebeat/input/awss3/mock_interfaces_test.go b/x-pack/filebeat/input/awss3/mock_interfaces_test.go index ccae48a59b2f..086ca34136fd 100644 --- a/x-pack/filebeat/input/awss3/mock_interfaces_test.go +++ b/x-pack/filebeat/input/awss3/mock_interfaces_test.go @@ -18,7 +18,6 @@ import ( gomock "github.com/golang/mock/gomock" beat "github.com/elastic/beats/v7/libbeat/beat" - aws "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" logp "github.com/elastic/elastic-agent-libs/logp" ) @@ -103,156 +102,6 @@ func (mr *MockSQSAPIMockRecorder) ReceiveMessage(ctx, maxMessages interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReceiveMessage", reflect.TypeOf((*MockSQSAPI)(nil).ReceiveMessage), ctx, maxMessages) } -// MocksqsReceiver is a mock of sqsReceiver interface. -type MocksqsReceiver struct { - ctrl *gomock.Controller - recorder *MocksqsReceiverMockRecorder -} - -// MocksqsReceiverMockRecorder is the mock recorder for MocksqsReceiver. -type MocksqsReceiverMockRecorder struct { - mock *MocksqsReceiver -} - -// NewMocksqsReceiver creates a new mock instance. -func NewMocksqsReceiver(ctrl *gomock.Controller) *MocksqsReceiver { - mock := &MocksqsReceiver{ctrl: ctrl} - mock.recorder = &MocksqsReceiverMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MocksqsReceiver) EXPECT() *MocksqsReceiverMockRecorder { - return m.recorder -} - -// ReceiveMessage mocks base method. -func (m *MocksqsReceiver) ReceiveMessage(ctx context.Context, maxMessages int) ([]types.Message, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReceiveMessage", ctx, maxMessages) - ret0, _ := ret[0].([]types.Message) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReceiveMessage indicates an expected call of ReceiveMessage. -func (mr *MocksqsReceiverMockRecorder) ReceiveMessage(ctx, maxMessages interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReceiveMessage", reflect.TypeOf((*MocksqsReceiver)(nil).ReceiveMessage), ctx, maxMessages) -} - -// MocksqsDeleter is a mock of sqsDeleter interface. -type MocksqsDeleter struct { - ctrl *gomock.Controller - recorder *MocksqsDeleterMockRecorder -} - -// MocksqsDeleterMockRecorder is the mock recorder for MocksqsDeleter. -type MocksqsDeleterMockRecorder struct { - mock *MocksqsDeleter -} - -// NewMocksqsDeleter creates a new mock instance. -func NewMocksqsDeleter(ctrl *gomock.Controller) *MocksqsDeleter { - mock := &MocksqsDeleter{ctrl: ctrl} - mock.recorder = &MocksqsDeleterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MocksqsDeleter) EXPECT() *MocksqsDeleterMockRecorder { - return m.recorder -} - -// DeleteMessage mocks base method. -func (m *MocksqsDeleter) DeleteMessage(ctx context.Context, msg *types.Message) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteMessage", ctx, msg) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteMessage indicates an expected call of DeleteMessage. -func (mr *MocksqsDeleterMockRecorder) DeleteMessage(ctx, msg interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMessage", reflect.TypeOf((*MocksqsDeleter)(nil).DeleteMessage), ctx, msg) -} - -// MocksqsVisibilityChanger is a mock of sqsVisibilityChanger interface. -type MocksqsVisibilityChanger struct { - ctrl *gomock.Controller - recorder *MocksqsVisibilityChangerMockRecorder -} - -// MocksqsVisibilityChangerMockRecorder is the mock recorder for MocksqsVisibilityChanger. -type MocksqsVisibilityChangerMockRecorder struct { - mock *MocksqsVisibilityChanger -} - -// NewMocksqsVisibilityChanger creates a new mock instance. -func NewMocksqsVisibilityChanger(ctrl *gomock.Controller) *MocksqsVisibilityChanger { - mock := &MocksqsVisibilityChanger{ctrl: ctrl} - mock.recorder = &MocksqsVisibilityChangerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MocksqsVisibilityChanger) EXPECT() *MocksqsVisibilityChangerMockRecorder { - return m.recorder -} - -// ChangeMessageVisibility mocks base method. -func (m *MocksqsVisibilityChanger) ChangeMessageVisibility(ctx context.Context, msg *types.Message, timeout time.Duration) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ChangeMessageVisibility", ctx, msg, timeout) - ret0, _ := ret[0].(error) - return ret0 -} - -// ChangeMessageVisibility indicates an expected call of ChangeMessageVisibility. -func (mr *MocksqsVisibilityChangerMockRecorder) ChangeMessageVisibility(ctx, msg, timeout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeMessageVisibility", reflect.TypeOf((*MocksqsVisibilityChanger)(nil).ChangeMessageVisibility), ctx, msg, timeout) -} - -// MocksqsAttributeGetter is a mock of sqsAttributeGetter interface. -type MocksqsAttributeGetter struct { - ctrl *gomock.Controller - recorder *MocksqsAttributeGetterMockRecorder -} - -// MocksqsAttributeGetterMockRecorder is the mock recorder for MocksqsAttributeGetter. -type MocksqsAttributeGetterMockRecorder struct { - mock *MocksqsAttributeGetter -} - -// NewMocksqsAttributeGetter creates a new mock instance. -func NewMocksqsAttributeGetter(ctrl *gomock.Controller) *MocksqsAttributeGetter { - mock := &MocksqsAttributeGetter{ctrl: ctrl} - mock.recorder = &MocksqsAttributeGetterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MocksqsAttributeGetter) EXPECT() *MocksqsAttributeGetterMockRecorder { - return m.recorder -} - -// GetQueueAttributes mocks base method. -func (m *MocksqsAttributeGetter) GetQueueAttributes(ctx context.Context, attr []types.QueueAttributeName) (map[string]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetQueueAttributes", ctx, attr) - ret0, _ := ret[0].(map[string]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetQueueAttributes indicates an expected call of GetQueueAttributes. -func (mr *MocksqsAttributeGetterMockRecorder) GetQueueAttributes(ctx, attr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueueAttributes", reflect.TypeOf((*MocksqsAttributeGetter)(nil).GetQueueAttributes), ctx, attr) -} - // MockSQSProcessor is a mock of sqsProcessor interface. type MockSQSProcessor struct { ctrl *gomock.Controller @@ -277,17 +126,17 @@ func (m *MockSQSProcessor) EXPECT() *MockSQSProcessorMockRecorder { } // ProcessSQS mocks base method. -func (m *MockSQSProcessor) ProcessSQS(ctx context.Context, msg *types.Message) error { +func (m *MockSQSProcessor) ProcessSQS(ctx context.Context, msg *types.Message, eventCallback func(beat.Event)) sqsProcessingResult { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ProcessSQS", ctx, msg) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "ProcessSQS", ctx, msg, eventCallback) + ret0, _ := ret[0].(sqsProcessingResult) return ret0 } // ProcessSQS indicates an expected call of ProcessSQS. -func (mr *MockSQSProcessorMockRecorder) ProcessSQS(ctx, msg interface{}) *gomock.Call { +func (mr *MockSQSProcessorMockRecorder) ProcessSQS(ctx, msg, eventCallback interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessSQS", reflect.TypeOf((*MockSQSProcessor)(nil).ProcessSQS), ctx, msg) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessSQS", reflect.TypeOf((*MockSQSProcessor)(nil).ProcessSQS), ctx, msg, eventCallback) } // MockS3API is a mock of s3API interface. @@ -581,17 +430,17 @@ func (m *MockS3ObjectHandlerFactory) EXPECT() *MockS3ObjectHandlerFactoryMockRec } // Create mocks base method. -func (m *MockS3ObjectHandlerFactory) Create(ctx context.Context, log *logp.Logger, client beat.Client, acker *aws.EventACKTracker, obj s3EventV2) s3ObjectHandler { +func (m *MockS3ObjectHandlerFactory) Create(ctx context.Context, obj s3EventV2) s3ObjectHandler { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Create", ctx, log, client, acker, obj) + ret := m.ctrl.Call(m, "Create", ctx, obj) ret0, _ := ret[0].(s3ObjectHandler) return ret0 } // Create indicates an expected call of Create. -func (mr *MockS3ObjectHandlerFactoryMockRecorder) Create(ctx, log, client, acker, obj interface{}) *gomock.Call { +func (mr *MockS3ObjectHandlerFactoryMockRecorder) Create(ctx, obj interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockS3ObjectHandlerFactory)(nil).Create), ctx, log, client, acker, obj) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockS3ObjectHandlerFactory)(nil).Create), ctx, obj) } // MockS3ObjectHandler is a mock of s3ObjectHandler interface. @@ -632,27 +481,15 @@ func (mr *MockS3ObjectHandlerMockRecorder) FinalizeS3Object() *gomock.Call { } // ProcessS3Object mocks base method. -func (m *MockS3ObjectHandler) ProcessS3Object() error { +func (m *MockS3ObjectHandler) ProcessS3Object(log *logp.Logger, eventCallback func(beat.Event)) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ProcessS3Object") + ret := m.ctrl.Call(m, "ProcessS3Object", log, eventCallback) ret0, _ := ret[0].(error) return ret0 } // ProcessS3Object indicates an expected call of ProcessS3Object. -func (mr *MockS3ObjectHandlerMockRecorder) ProcessS3Object() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessS3Object", reflect.TypeOf((*MockS3ObjectHandler)(nil).ProcessS3Object)) -} - -// Wait mocks base method. -func (m *MockS3ObjectHandler) Wait() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Wait") -} - -// Wait indicates an expected call of Wait. -func (mr *MockS3ObjectHandlerMockRecorder) Wait() *gomock.Call { +func (mr *MockS3ObjectHandlerMockRecorder) ProcessS3Object(log, eventCallback interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wait", reflect.TypeOf((*MockS3ObjectHandler)(nil).Wait)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessS3Object", reflect.TypeOf((*MockS3ObjectHandler)(nil).ProcessS3Object), log, eventCallback) } diff --git a/x-pack/filebeat/input/awss3/s3.go b/x-pack/filebeat/input/awss3/s3.go index d611470ec80c..9901d5fe41d4 100644 --- a/x-pack/filebeat/input/awss3/s3.go +++ b/x-pack/filebeat/input/awss3/s3.go @@ -14,7 +14,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/elastic/beats/v7/libbeat/beat" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" ) func createS3API(ctx context.Context, config config, awsConfig awssdk.Config) (*awsS3API, error) { @@ -32,9 +31,9 @@ func createS3API(ctx context.Context, config config, awsConfig awssdk.Config) (* return newAWSs3API(s3Client), nil } -func createPipelineClient(pipeline beat.Pipeline) (beat.Client, error) { +func createPipelineClient(pipeline beat.Pipeline, acks *awsACKHandler) (beat.Client, error) { return pipeline.ConnectWith(beat.ClientConfig{ - EventListener: awscommon.NewEventACKHandler(), + EventListener: acks.pipelineEventListener(), Processing: beat.ProcessingConfig{ // This input only produces events with basic types so normalization // is not required. @@ -117,5 +116,6 @@ type nonAWSBucketResolver struct { } func (n nonAWSBucketResolver) ResolveEndpoint(region string, options s3.EndpointResolverOptions) (awssdk.Endpoint, error) { + //nolint:staticcheck // haven't migrated to the new interface yet return awssdk.Endpoint{URL: n.endpoint, SigningRegion: region, HostnameImmutable: true, Source: awssdk.EndpointSourceCustom}, nil } diff --git a/x-pack/filebeat/input/awss3/s3_input.go b/x-pack/filebeat/input/awss3/s3_input.go index bd1e8f7700e5..c3a83c284a2f 100644 --- a/x-pack/filebeat/input/awss3/s3_input.go +++ b/x-pack/filebeat/input/awss3/s3_input.go @@ -17,7 +17,6 @@ import ( v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/backoff" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-concert/timed" ) @@ -28,23 +27,17 @@ var readerLoopMaxCircuitBreaker = 10 type s3PollerInput struct { log *logp.Logger + pipeline beat.Pipeline config config awsConfig awssdk.Config store beater.StateStore provider string s3 s3API metrics *inputMetrics - client beat.Client s3ObjectHandler s3ObjectHandlerFactory states *states } -// s3FetchTask contains metadata for one S3 object that a worker should fetch. -type s3FetchTask struct { - s3ObjectHandler s3ObjectHandler - objectState state -} - func newS3PollerInput( config config, awsConfig awssdk.Config, @@ -69,6 +62,7 @@ func (in *s3PollerInput) Run( pipeline beat.Pipeline, ) error { in.log = inputContext.Logger.Named("s3") + in.pipeline = pipeline var err error // Load the persistent S3 polling state. @@ -78,24 +72,16 @@ func (in *s3PollerInput) Run( } defer in.states.Close() - // Create client for publishing events and receive notification of their ACKs. - in.client, err = createPipelineClient(pipeline) - if err != nil { - return fmt.Errorf("failed to create pipeline client: %w", err) - } - defer in.client.Close() - ctx := v2.GoContextFromCanceler(inputContext.Cancelation) in.s3, err = createS3API(ctx, in.config, in.awsConfig) if err != nil { return fmt.Errorf("failed to create S3 API: %w", err) } - in.metrics = newInputMetrics(inputContext.ID, nil, in.config.MaxNumberOfMessages) + in.metrics = newInputMetrics(inputContext.ID, nil, in.config.NumberOfWorkers) defer in.metrics.Close() in.s3ObjectHandler = newS3ObjectProcessorFactory( - in.log, in.metrics, in.s3, in.config.getFileSelectors(), @@ -117,7 +103,7 @@ func (in *s3PollerInput) run(ctx context.Context) { func (in *s3PollerInput) runPoll(ctx context.Context) { var workerWg sync.WaitGroup - workChan := make(chan *s3FetchTask) + workChan := make(chan state) // Start the worker goroutines to listen on the work channel for i := 0; i < in.config.NumberOfWorkers; i++ { @@ -133,15 +119,37 @@ func (in *s3PollerInput) runPoll(ctx context.Context) { workerWg.Wait() } -func (in *s3PollerInput) workerLoop(ctx context.Context, workChan <-chan *s3FetchTask) { +func (in *s3PollerInput) workerLoop(ctx context.Context, workChan <-chan state) { + acks := newAWSACKHandler() + // Create client for publishing events and receive notification of their ACKs. + client, err := createPipelineClient(in.pipeline, acks) + if err != nil { + in.log.Errorf("failed to create pipeline client: %v", err.Error()) + return + } + defer client.Close() + defer acks.Close() + rateLimitWaiter := backoff.NewEqualJitterBackoff(ctx.Done(), 1, 120) - for s3ObjectPayload := range workChan { - objHandler := s3ObjectPayload.s3ObjectHandler - state := s3ObjectPayload.objectState + for _state := range workChan { + state := _state + event := in.s3EventForState(state) + + objHandler := in.s3ObjectHandler.Create(ctx, event) + if objHandler == nil { + in.log.Debugw("empty s3 processor (no matching reader configs).", "state", state) + continue + } // Process S3 object (download, parse, create events). - err := objHandler.ProcessS3Object() + publishCount := 0 + err := objHandler.ProcessS3Object(in.log, func(e beat.Event) { + in.metrics.s3EventsCreatedTotal.Inc() + client.Publish(e) + publishCount++ + }) + in.metrics.s3EventsPerObject.Update(int64(publishCount)) if errors.Is(err, errS3DownloadFailed) { // Download errors are ephemeral. Add a backoff delay, then skip to the // next iteration so we don't mark the object as permanently failed. @@ -151,9 +159,7 @@ func (in *s3PollerInput) workerLoop(ctx context.Context, workChan <-chan *s3Fetc // Reset the rate limit delay on results that aren't download errors. rateLimitWaiter.Reset() - // Wait for downloaded objects to be ACKed. - objHandler.Wait() - + // Update state, but don't persist it until this object is acknowledged. if err != nil { in.log.Errorf("failed processing S3 event for object key %q in bucket %q: %v", state.Key, state.Bucket, err.Error()) @@ -164,22 +170,20 @@ func (in *s3PollerInput) workerLoop(ctx context.Context, workChan <-chan *s3Fetc state.Stored = true } - // Persist the result, report any errors - err = in.states.AddState(state) - if err != nil { - in.log.Errorf("saving completed object state: %v", err.Error()) - } - - // Metrics - in.metrics.s3ObjectsAckedTotal.Inc() + // Add the cleanup handling to the acks helper + acks.Add(publishCount, func() { + err := in.states.AddState(state) + if err != nil { + in.log.Errorf("saving completed object state: %v", err.Error()) + } - if finalizeErr := objHandler.FinalizeS3Object(); finalizeErr != nil { - in.log.Errorf("failed finalizing objects from S3 bucket (manual cleanup is required): %w", finalizeErr) - } + // Metrics + in.metrics.s3ObjectsAckedTotal.Inc() + }) } } -func (in *s3PollerInput) readerLoop(ctx context.Context, workChan chan<- *s3FetchTask) { +func (in *s3PollerInput) readerLoop(ctx context.Context, workChan chan<- state) { defer close(workChan) bucketName := getBucketNameFromARN(in.config.getBucketARN()) @@ -220,31 +224,19 @@ func (in *s3PollerInput) readerLoop(ctx context.Context, workChan chan<- *s3Fetc continue } - s3Processor := in.createS3ObjectProcessor(ctx, state) - if s3Processor == nil { - in.log.Debugw("empty s3 processor.", "state", state) - continue - } - - workChan <- &s3FetchTask{ - s3ObjectHandler: s3Processor, - objectState: state, - } + workChan <- state in.metrics.s3ObjectsProcessedTotal.Inc() } } } -func (in *s3PollerInput) createS3ObjectProcessor(ctx context.Context, state state) s3ObjectHandler { +func (in *s3PollerInput) s3EventForState(state state) s3EventV2 { event := s3EventV2{} event.AWSRegion = in.awsConfig.Region event.Provider = in.provider event.S3.Bucket.Name = state.Bucket event.S3.Bucket.ARN = in.config.getBucketARN() event.S3.Object.Key = state.Key - - acker := awscommon.NewEventACKTracker(ctx) - - return in.s3ObjectHandler.Create(ctx, in.log, in.client, acker, event) + return event } diff --git a/x-pack/filebeat/input/awss3/s3_objects.go b/x-pack/filebeat/input/awss3/s3_objects.go index 82a9e817bc68..93219d9a6408 100644 --- a/x-pack/filebeat/input/awss3/s3_objects.go +++ b/x-pack/filebeat/input/awss3/s3_objects.go @@ -25,30 +25,48 @@ import ( "github.com/elastic/beats/v7/libbeat/reader" "github.com/elastic/beats/v7/libbeat/reader/readfile" "github.com/elastic/beats/v7/libbeat/reader/readfile/encoding" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) -const ( - contentTypeJSON = "application/json" - contentTypeNDJSON = "application/x-ndjson" -) - type s3ObjectProcessorFactory struct { - log *logp.Logger metrics *inputMetrics s3 s3API fileSelectors []fileSelectorConfig backupConfig backupConfig } +type s3ObjectProcessor struct { + *s3ObjectProcessorFactory + + ctx context.Context + eventCallback func(beat.Event) + readerConfig *readerConfig // Config about how to process the object. + s3Obj s3EventV2 // S3 object information. + s3ObjHash string + s3RequestURL string + + s3Metadata map[string]interface{} // S3 object metadata. +} + +type s3DownloadedObject struct { + body io.ReadCloser + length int64 + contentType string + metadata map[string]interface{} +} + +const ( + contentTypeJSON = "application/json" + contentTypeNDJSON = "application/x-ndjson" +) + // errS3DownloadFailed reports problems downloading an S3 object. Download errors // should never treated as permanent, they are just an indication to apply a // retry backoff until the connection is healthy again. var errS3DownloadFailed = errors.New("S3 download failure") -func newS3ObjectProcessorFactory(log *logp.Logger, metrics *inputMetrics, s3 s3API, sel []fileSelectorConfig, backupConfig backupConfig) *s3ObjectProcessorFactory { +func newS3ObjectProcessorFactory(metrics *inputMetrics, s3 s3API, sel []fileSelectorConfig, backupConfig backupConfig) *s3ObjectProcessorFactory { if metrics == nil { // Metrics are optional. Initialize a stub. metrics = newInputMetrics("", nil, 0) @@ -59,7 +77,6 @@ func newS3ObjectProcessorFactory(log *logp.Logger, metrics *inputMetrics, s3 s3A } } return &s3ObjectProcessorFactory{ - log: log, metrics: metrics, s3: s3, fileSelectors: sel, @@ -78,64 +95,33 @@ func (f *s3ObjectProcessorFactory) findReaderConfig(key string) *readerConfig { // Create returns a new s3ObjectProcessor. It returns nil when no file selectors // match the S3 object key. -func (f *s3ObjectProcessorFactory) Create(ctx context.Context, log *logp.Logger, client beat.Client, ack *awscommon.EventACKTracker, obj s3EventV2) s3ObjectHandler { - log = log.With( - "bucket_arn", obj.S3.Bucket.Name, - "object_key", obj.S3.Object.Key) - +func (f *s3ObjectProcessorFactory) Create(ctx context.Context, obj s3EventV2) s3ObjectHandler { readerConfig := f.findReaderConfig(obj.S3.Object.Key) if readerConfig == nil { - log.Debug("Skipping S3 object processing. No file_selectors are a match.") + // No file_selectors are a match, skip. return nil } return &s3ObjectProcessor{ s3ObjectProcessorFactory: f, - log: log, ctx: ctx, - publisher: client, - acker: ack, readerConfig: readerConfig, s3Obj: obj, s3ObjHash: s3ObjectHash(obj), } } -// s3DownloadedObject encapsulate downloaded s3 object for internal processing -type s3DownloadedObject struct { - body io.ReadCloser - length int64 - contentType string - metadata map[string]interface{} -} - -type s3ObjectProcessor struct { - *s3ObjectProcessorFactory - - log *logp.Logger - ctx context.Context - publisher beat.Client - acker *awscommon.EventACKTracker // ACKer tied to the SQS message (multiple S3 readers share an ACKer when the S3 notification event contains more than one S3 object). - readerConfig *readerConfig // Config about how to process the object. - s3Obj s3EventV2 // S3 object information. - s3ObjHash string - s3RequestURL string - eventCount int64 - - s3Metadata map[string]interface{} // S3 object metadata. -} - -func (p *s3ObjectProcessor) Wait() { - p.acker.Wait() -} - -func (p *s3ObjectProcessor) ProcessS3Object() error { +func (p *s3ObjectProcessor) ProcessS3Object(log *logp.Logger, eventCallback func(e beat.Event)) error { if p == nil { return nil } + p.eventCallback = eventCallback + log = log.With( + "bucket_arn", p.s3Obj.S3.Bucket.Name, + "object_key", p.s3Obj.S3.Object.Key) // Metrics and Logging - p.log.Debug("Begin S3 object processing.") + log.Debug("Begin S3 object processing.") p.metrics.s3ObjectsRequestedTotal.Inc() p.metrics.s3ObjectsInflight.Inc() start := time.Now() @@ -143,7 +129,7 @@ func (p *s3ObjectProcessor) ProcessS3Object() error { elapsed := time.Since(start) p.metrics.s3ObjectsInflight.Dec() p.metrics.s3ObjectProcessingTime.Update(elapsed.Nanoseconds()) - p.log.Debugw("End S3 object processing.", "elapsed_time_ns", elapsed) + log.Debugw("End S3 object processing.", "elapsed_time_ns", elapsed) }() // Request object (download). @@ -181,7 +167,7 @@ func (p *s3ObjectProcessor) ProcessS3Object() error { for dec.next() { val, err := dec.decodeValue() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } break @@ -191,7 +177,8 @@ func (p *s3ObjectProcessor) ProcessS3Object() error { return err } evt := p.createEvent(string(data), evtOffset) - p.publish(p.acker, &evt) + + p.eventCallback(evt) } case decoder: @@ -226,7 +213,6 @@ func (p *s3ObjectProcessor) ProcessS3Object() error { time.Since(start).Nanoseconds(), err) } - p.metrics.s3EventsPerObject.Update(p.eventCount) return nil } @@ -298,7 +284,7 @@ func (p *s3ObjectProcessor) readJSON(r io.Reader) error { data, _ := item.MarshalJSON() evt := p.createEvent(string(data), offset) - p.publish(p.acker, &evt) + p.eventCallback(evt) } return nil @@ -333,7 +319,7 @@ func (p *s3ObjectProcessor) readJSONSlice(r io.Reader, evtOffset int64) (int64, data, _ := item.MarshalJSON() evt := p.createEvent(string(data), evtOffset) - p.publish(p.acker, &evt) + p.eventCallback(evt) evtOffset++ } @@ -378,7 +364,7 @@ func (p *s3ObjectProcessor) splitEventList(key string, raw json.RawMessage, offs data, _ := item.MarshalJSON() p.s3ObjHash = objHash evt := p.createEvent(string(data), offset+arrayOffset) - p.publish(p.acker, &evt) + p.eventCallback(evt) } return nil @@ -418,7 +404,7 @@ func (p *s3ObjectProcessor) readFile(r io.Reader) error { event := p.createEvent(string(message.Content), offset) event.Fields.DeepUpdate(message.Fields) offset += int64(message.Bytes) - p.publish(p.acker, &event) + p.eventCallback(event) } if errors.Is(err, io.EOF) { @@ -433,15 +419,6 @@ func (p *s3ObjectProcessor) readFile(r io.Reader) error { return nil } -// publish the generated event and perform necessary tracking -func (p *s3ObjectProcessor) publish(ack *awscommon.EventACKTracker, event *beat.Event) { - ack.Add() - event.Private = ack - p.eventCount += 1 - p.metrics.s3EventsCreatedTotal.Inc() - p.publisher.Publish(*event) -} - func (p *s3ObjectProcessor) createEvent(message string, offset int64) beat.Event { event := beat.Event{ Timestamp: time.Now().UTC(), diff --git a/x-pack/filebeat/input/awss3/s3_objects_test.go b/x-pack/filebeat/input/awss3/s3_objects_test.go index 635955ed8c42..d20d81ced6c8 100644 --- a/x-pack/filebeat/input/awss3/s3_objects_test.go +++ b/x-pack/filebeat/input/awss3/s3_objects_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" ) @@ -148,7 +147,6 @@ func TestS3ObjectProcessor(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event := newS3Event("log.txt") @@ -156,9 +154,8 @@ func TestS3ObjectProcessor(t *testing.T) { GetObject(gomock.Any(), gomock.Eq("us-east-1"), gomock.Eq(s3Event.S3.Bucket.Name), gomock.Eq(s3Event.S3.Object.Key)). Return(nil, errFakeConnectivityFailure) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, nil, backupConfig{}) + err := s3ObjProc.Create(ctx, s3Event).ProcessS3Object(logp.NewLogger(inputName), func(_ beat.Event) {}) require.Error(t, err) assert.True(t, errors.Is(err, errS3DownloadFailed), "expected errS3DownloadFailed") }) @@ -170,7 +167,6 @@ func TestS3ObjectProcessor(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event := newS3Event("log.txt") @@ -178,9 +174,8 @@ func TestS3ObjectProcessor(t *testing.T) { GetObject(gomock.Any(), gomock.Eq("us-east-1"), gomock.Eq(s3Event.S3.Bucket.Name), gomock.Eq(s3Event.S3.Object.Key)). Return(nil, nil) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, nil, backupConfig{}) + err := s3ObjProc.Create(ctx, s3Event).ProcessS3Object(logp.NewLogger(inputName), func(_ beat.Event) {}) require.Error(t, err) }) @@ -191,23 +186,20 @@ func TestS3ObjectProcessor(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event, s3Resp := newS3Object(t, "testdata/log.txt", "") - var events []beat.Event gomock.InOrder( mockS3API.EXPECT(). GetObject(gomock.Any(), gomock.Eq("us-east-1"), gomock.Eq(s3Event.S3.Bucket.Name), gomock.Eq(s3Event.S3.Object.Key)). Return(s3Resp, nil), - mockPublisher.EXPECT(). - Publish(gomock.Any()). - Do(func(event beat.Event) { events = append(events, event) }). - Times(2), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupConfig{}) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() + var events []beat.Event + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, nil, backupConfig{}) + err := s3ObjProc.Create(ctx, s3Event).ProcessS3Object(logp.NewLogger(inputName), func(event beat.Event) { + events = append(events, event) + }) + assert.Equal(t, 2, len(events)) require.NoError(t, err) }) @@ -218,7 +210,6 @@ func TestS3ObjectProcessor(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event, _ := newS3Object(t, "testdata/log.txt", "") backupCfg := backupConfig{ @@ -231,9 +222,8 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).FinalizeS3Object() + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, nil, backupCfg) + err := s3ObjProc.Create(ctx, s3Event).FinalizeS3Object() require.NoError(t, err) }) @@ -244,7 +234,6 @@ func TestS3ObjectProcessor(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event, _ := newS3Object(t, "testdata/log.txt", "") backupCfg := backupConfig{ @@ -261,9 +250,8 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).FinalizeS3Object() + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, nil, backupCfg) + err := s3ObjProc.Create(ctx, s3Event).FinalizeS3Object() require.NoError(t, err) }) @@ -274,7 +262,6 @@ func TestS3ObjectProcessor(t *testing.T) { ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event, _ := newS3Object(t, "testdata/log.txt", "") backupCfg := backupConfig{ @@ -288,9 +275,8 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, nil, backupCfg) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).FinalizeS3Object() + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, nil, backupCfg) + err := s3ObjProc.Create(ctx, s3Event).FinalizeS3Object() require.NoError(t, err) }) @@ -320,7 +306,6 @@ func _testProcessS3Object(t testing.TB, file, contentType string, numEvents int, ctrl, ctx := gomock.WithContext(ctx, t) defer ctrl.Finish() mockS3API := NewMockS3API(ctrl) - mockPublisher := NewMockBeatClient(ctrl) s3Event, s3Resp := newS3Object(t, file, contentType) var events []beat.Event @@ -328,20 +313,16 @@ func _testProcessS3Object(t testing.TB, file, contentType string, numEvents int, mockS3API.EXPECT(). GetObject(gomock.Any(), gomock.Eq("us-east-1"), gomock.Eq(s3Event.S3.Bucket.Name), gomock.Eq(s3Event.S3.Object.Key)). Return(s3Resp, nil), - mockPublisher.EXPECT(). - Publish(gomock.Any()). - Do(func(event beat.Event) { events = append(events, event) }). - Times(numEvents), ) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, selectors, backupConfig{}) - ack := awscommon.NewEventACKTracker(ctx) - err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), mockPublisher, ack, s3Event).ProcessS3Object() + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3API, selectors, backupConfig{}) + err := s3ObjProc.Create(ctx, s3Event).ProcessS3Object( + logp.NewLogger(inputName), + func(event beat.Event) { events = append(events, event) }) if !expectErr { require.NoError(t, err) assert.Equal(t, numEvents, len(events)) - assert.EqualValues(t, numEvents, ack.PendingACKs) } else { require.Error(t, err) } diff --git a/x-pack/filebeat/input/awss3/s3_test.go b/x-pack/filebeat/input/awss3/s3_test.go index 9c6099e775ae..b0b19d828318 100644 --- a/x-pack/filebeat/input/awss3/s3_test.go +++ b/x-pack/filebeat/input/awss3/s3_test.go @@ -36,7 +36,7 @@ func TestS3Poller(t *testing.T) { defer ctrl.Finish() mockAPI := NewMockS3API(ctrl) mockPager := NewMockS3Pager(ctrl) - mockPublisher := NewMockBeatClient(ctrl) + pipeline := newFakePipeline() gomock.InOrder( mockAPI.EXPECT(). @@ -126,7 +126,7 @@ func TestS3Poller(t *testing.T) { GetObject(gomock.Any(), gomock.Eq(""), gomock.Eq(bucket), gomock.Eq("2024-02-08T08:35:00+00:02.json.gz")). Return(nil, errFakeConnectivityFailure) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockAPI, nil, backupConfig{}) + s3ObjProc := newS3ObjectProcessorFactory(nil, mockAPI, nil, backupConfig{}) states, err := newStates(nil, store) require.NoError(t, err, "states creation must succeed") poller := &s3PollerInput{ @@ -139,7 +139,7 @@ func TestS3Poller(t *testing.T) { RegionName: "region", }, s3: mockAPI, - client: mockPublisher, + pipeline: pipeline, s3ObjectHandler: s3ObjProc, states: states, provider: "provider", @@ -162,7 +162,7 @@ func TestS3Poller(t *testing.T) { mockS3 := NewMockS3API(ctrl) mockErrorPager := NewMockS3Pager(ctrl) mockSuccessPager := NewMockS3Pager(ctrl) - mockPublisher := NewMockBeatClient(ctrl) + pipeline := newFakePipeline() gomock.InOrder( // Initial ListObjectPaginator gets an error. @@ -264,7 +264,7 @@ func TestS3Poller(t *testing.T) { GetObject(gomock.Any(), gomock.Eq(""), gomock.Eq(bucket), gomock.Eq("key5")). Return(nil, errFakeConnectivityFailure) - s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3, nil, backupConfig{}) + s3ObjProc := newS3ObjectProcessorFactory(nil, mockS3, nil, backupConfig{}) states, err := newStates(nil, store) require.NoError(t, err, "states creation must succeed") poller := &s3PollerInput{ @@ -277,7 +277,7 @@ func TestS3Poller(t *testing.T) { RegionName: "region", }, s3: mockS3, - client: mockPublisher, + pipeline: pipeline, s3ObjectHandler: s3ObjProc, states: states, provider: "provider", diff --git a/x-pack/filebeat/input/awss3/sqs_input.go b/x-pack/filebeat/input/awss3/sqs_input.go index a92319cbe192..a4308af45a80 100644 --- a/x-pack/filebeat/input/awss3/sqs_input.go +++ b/x-pack/filebeat/input/awss3/sqs_input.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "sync" - "time" awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -29,6 +28,10 @@ type sqsReaderInput struct { log *logp.Logger metrics *inputMetrics + // The Beats pipeline, used to create clients for event publication when + // creating the worker goroutines. + pipeline beat.Pipeline + // The expected region based on the queue URL detectedRegion string @@ -46,7 +49,7 @@ func newSQSReaderInput(config config, awsConfig awssdk.Config) *sqsReaderInput { return &sqsReaderInput{ config: config, awsConfig: awsConfig, - workRequestChan: make(chan struct{}, config.MaxNumberOfMessages), + workRequestChan: make(chan struct{}, config.NumberOfWorkers), workResponseChan: make(chan types.Message), } } @@ -83,6 +86,7 @@ func (in *sqsReaderInput) setup( pipeline beat.Pipeline, ) error { in.log = inputContext.Logger.With("queue_url", in.config.QueueURL) + in.pipeline = pipeline in.detectedRegion = getRegionFromQueueURL(in.config.QueueURL, in.config.AWSConfig.Endpoint) if in.config.RegionName != "" { @@ -105,10 +109,10 @@ func (in *sqsReaderInput) setup( in.s3 = newAWSs3API(s3.NewFromConfig(in.awsConfig, in.config.s3ConfigModifier)) - in.metrics = newInputMetrics(inputContext.ID, nil, in.config.MaxNumberOfMessages) + in.metrics = newInputMetrics(inputContext.ID, nil, in.config.NumberOfWorkers) var err error - in.msgHandler, err = in.createEventProcessor(pipeline) + in.msgHandler, err = in.createEventProcessor() if err != nil { return fmt.Errorf("failed to initialize sqs reader: %w", err) } @@ -161,42 +165,87 @@ func (in *sqsReaderInput) readerLoop(ctx context.Context) { } } -func (in *sqsReaderInput) workerLoop(ctx context.Context) { +type sqsWorker struct { + input *sqsReaderInput + client beat.Client + ackHandler *awsACKHandler +} + +func (in *sqsReaderInput) newSQSWorker() (*sqsWorker, error) { + // Create a pipeline client scoped to this worker. + ackHandler := newAWSACKHandler() + client, err := in.pipeline.ConnectWith(beat.ClientConfig{ + EventListener: ackHandler.pipelineEventListener(), + Processing: beat.ProcessingConfig{ + // This input only produces events with basic types so normalization + // is not required. + EventNormalization: boolPtr(false), + }, + }) + if err != nil { + return nil, fmt.Errorf("connecting to pipeline: %w", err) + } + return &sqsWorker{ + input: in, + client: client, + ackHandler: ackHandler, + }, nil +} + +func (w *sqsWorker) run(ctx context.Context) { + defer w.client.Close() + defer w.ackHandler.Close() + for ctx.Err() == nil { // Send a work request select { case <-ctx.Done(): // Shutting down return - case in.workRequestChan <- struct{}{}: + case w.input.workRequestChan <- struct{}{}: } // The request is sent, wait for a response select { case <-ctx.Done(): return - case msg := <-in.workResponseChan: - start := time.Now() - - id := in.metrics.beginSQSWorker() - if err := in.msgHandler.ProcessSQS(ctx, &msg); err != nil { - in.log.Warnw("Failed processing SQS message.", - "error", err, - "message_id", *msg.MessageId, - "elapsed_time_ns", time.Since(start)) - } - in.metrics.endSQSWorker(id) + case msg := <-w.input.workResponseChan: + w.processMessage(ctx, msg) } } } +func (w *sqsWorker) processMessage(ctx context.Context, msg types.Message) { + publishCount := 0 + id := w.input.metrics.beginSQSWorker() + result := w.input.msgHandler.ProcessSQS(ctx, &msg, func(e beat.Event) { + w.client.Publish(e) + publishCount++ + }) + + if publishCount == 0 { + // No events made it through (probably an error state), wrap up immediately + result.Done() + } else { + // Add this result's Done callback to the pending ACKs list + w.ackHandler.Add(publishCount, result.Done) + } + + w.input.metrics.endSQSWorker(id) +} + func (in *sqsReaderInput) startWorkers(ctx context.Context) { // Start the worker goroutines that will fetch messages via workRequestChan // and workResponseChan until the input shuts down. - for i := 0; i < in.config.MaxNumberOfMessages; i++ { + for i := 0; i < in.config.NumberOfWorkers; i++ { in.workerWg.Add(1) go func() { defer in.workerWg.Done() - in.workerLoop(ctx) + worker, err := in.newSQSWorker() + if err != nil { + in.log.Error(err) + return + } + go worker.run(ctx) }() } } @@ -209,7 +258,7 @@ func (in *sqsReaderInput) logConfigSummary() { log.Warnf("configured region disagrees with queue_url region (%q != %q): using %q", in.awsConfig.Region, in.detectedRegion, in.awsConfig.Region) } log.Infof("AWS SQS visibility_timeout is set to %v.", in.config.VisibilityTimeout) - log.Infof("AWS SQS max_number_of_messages is set to %v.", in.config.MaxNumberOfMessages) + log.Infof("AWS SQS number_of_workers is set to %v.", in.config.NumberOfWorkers) if in.config.BackupConfig.GetBucketName() != "" { log.Warnf("You have the backup_to_bucket functionality activated with SQS. Please make sure to set appropriate destination buckets " + @@ -217,15 +266,15 @@ func (in *sqsReaderInput) logConfigSummary() { } } -func (in *sqsReaderInput) createEventProcessor(pipeline beat.Pipeline) (sqsProcessor, error) { +func (in *sqsReaderInput) createEventProcessor() (sqsProcessor, error) { fileSelectors := in.config.getFileSelectors() - s3EventHandlerFactory := newS3ObjectProcessorFactory(in.log.Named("s3"), in.metrics, in.s3, fileSelectors, in.config.BackupConfig) + s3EventHandlerFactory := newS3ObjectProcessorFactory(in.metrics, in.s3, fileSelectors, in.config.BackupConfig) script, err := newScriptFromConfig(in.log.Named("sqs_script"), in.config.SQSScript) if err != nil { return nil, err } - return newSQSS3EventProcessor(in.log.Named("sqs_s3_event"), in.metrics, in.sqs, script, in.config.VisibilityTimeout, in.config.SQSMaxReceiveCount, pipeline, s3EventHandlerFactory), nil + return newSQSS3EventProcessor(in.log.Named("sqs_s3_event"), in.metrics, in.sqs, script, in.config.VisibilityTimeout, in.config.SQSMaxReceiveCount, s3EventHandlerFactory), nil } // Read all pending requests and return their count. If block is true, diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event.go b/x-pack/filebeat/input/awss3/sqs_s3_event.go index a489f6a7f72e..884cf7adbbce 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event.go @@ -20,7 +20,6 @@ import ( "go.uber.org/multierr" "github.com/elastic/beats/v7/libbeat/beat" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/elastic-agent-libs/logp" ) @@ -117,11 +116,10 @@ type eventBridgeEvent struct { } type sqsS3EventProcessor struct { - s3ObjectHandler s3ObjectHandlerFactory + s3HandlerFactory s3ObjectHandlerFactory sqsVisibilityTimeout time.Duration maxReceiveCount int sqs sqsAPI - pipeline beat.Pipeline // Pipeline creates clients for publishing events. log *logp.Logger warnOnce sync.Once metrics *inputMetrics @@ -135,7 +133,6 @@ func newSQSS3EventProcessor( script *script, sqsVisibilityTimeout time.Duration, maxReceiveCount int, - pipeline beat.Pipeline, s3 s3ObjectHandlerFactory, ) *sqsS3EventProcessor { if metrics == nil { @@ -143,18 +140,32 @@ func newSQSS3EventProcessor( metrics = newInputMetrics("", nil, 0) } return &sqsS3EventProcessor{ - s3ObjectHandler: s3, + s3HandlerFactory: s3, sqsVisibilityTimeout: sqsVisibilityTimeout, maxReceiveCount: maxReceiveCount, sqs: sqs, - pipeline: pipeline, log: log, metrics: metrics, script: script, } } -func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *types.Message) error { +type sqsProcessingResult struct { + processor *sqsS3EventProcessor + msg *types.Message + receiveCount int // How many times this SQS object has been read + eventCount int // How many events were generated from this SQS object + keepaliveCancel context.CancelFunc + processingErr error + + // Finalizer callbacks for the returned S3 events, invoked via + // finalizeS3Objects after all events are acknowledged. + finalizers []finalizerFunc +} + +type finalizerFunc func() error + +func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *types.Message, eventCallback func(beat.Event)) sqsProcessingResult { log := p.log.With( "message_id", *msg.MessageId, "message_receipt_time", time.Now().UTC()) @@ -165,7 +176,10 @@ func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *types.Message // Start SQS keepalive worker. var keepaliveWg sync.WaitGroup keepaliveWg.Add(1) - go p.keepalive(keepaliveCtx, log, &keepaliveWg, msg) + go func() { + defer keepaliveWg.Done() + p.keepalive(keepaliveCtx, log, msg) + }() receiveCount := getSQSReceiveCount(msg.Attributes) if receiveCount == 1 { @@ -179,45 +193,69 @@ func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *types.Message } } - handles, processingErr := p.processS3Events(ctx, log, *msg.Body) + eventCount := 0 + finalizers, processingErr := p.processS3Events(ctx, log, *msg.Body, func(e beat.Event) { + eventCount++ + eventCallback(e) + }) + + return sqsProcessingResult{ + msg: msg, + processor: p, + receiveCount: receiveCount, + eventCount: eventCount, + keepaliveCancel: keepaliveCancel, + processingErr: processingErr, + finalizers: finalizers, + } +} + +// Call Done to indicate that all events from this SQS message have been +// acknowledged and it is safe to stop the keepalive routine and +// delete / finalize the message. +func (r sqsProcessingResult) Done() { + p := r.processor + processingErr := r.processingErr // Stop keepalive routine before changing visibility. - keepaliveCancel() - keepaliveWg.Wait() + r.keepaliveCancel() // No error. Delete SQS. if processingErr == nil { - if msgDelErr := p.sqs.DeleteMessage(context.Background(), msg); msgDelErr != nil { - return fmt.Errorf("failed deleting message from SQS queue (it may be reprocessed): %w", msgDelErr) + if msgDelErr := p.sqs.DeleteMessage(context.Background(), r.msg); msgDelErr != nil { + p.log.Errorf("failed deleting message from SQS queue (it may be reprocessed): %v", msgDelErr.Error()) + return + } + if p.metrics != nil { + // This nil check always passes in production, but it's nice when unit + // tests don't have to initialize irrelevant fields + p.metrics.sqsMessagesDeletedTotal.Inc() } - p.metrics.sqsMessagesDeletedTotal.Inc() // SQS message finished and deleted, finalize s3 objects - if finalizeErr := p.finalizeS3Objects(handles); finalizeErr != nil { - return fmt.Errorf("failed finalizing message from SQS queue (manual cleanup is required): %w", finalizeErr) + if finalizeErr := r.finalizeS3Objects(); finalizeErr != nil { + p.log.Errorf("failed finalizing message from SQS queue (manual cleanup is required): %v", finalizeErr.Error()) } - return nil + return } - if p.maxReceiveCount > 0 && !errors.Is(processingErr, &nonRetryableError{}) { + if p.maxReceiveCount > 0 && r.receiveCount >= p.maxReceiveCount { // Prevent poison pill messages from consuming all workers. Check how // many times this message has been received before making a disposition. - if receiveCount >= p.maxReceiveCount { - processingErr = nonRetryableErrorWrap(fmt.Errorf( - "sqs ApproximateReceiveCount <%v> exceeds threshold %v: %w", - receiveCount, p.maxReceiveCount, processingErr)) - } + processingErr = nonRetryableErrorWrap(fmt.Errorf( + "sqs ApproximateReceiveCount <%v> exceeds threshold %v: %w", + r.receiveCount, p.maxReceiveCount, processingErr)) } // An error that reprocessing cannot correct. Delete SQS. if errors.Is(processingErr, &nonRetryableError{}) { - if msgDelErr := p.sqs.DeleteMessage(context.Background(), msg); msgDelErr != nil { - return multierr.Combine( - fmt.Errorf("failed processing SQS message (attempted to delete message): %w", processingErr), - fmt.Errorf("failed deleting message from SQS queue (it may be reprocessed): %w", msgDelErr), - ) + if msgDelErr := p.sqs.DeleteMessage(context.Background(), r.msg); msgDelErr != nil { + p.log.Errorf("failed processing SQS message (attempted to delete message): %v", processingErr.Error()) + p.log.Errorf("failed deleting message from SQS queue (it may be reprocessed): %v", msgDelErr.Error()) + return } p.metrics.sqsMessagesDeletedTotal.Inc() - return fmt.Errorf("failed processing SQS message (message was deleted): %w", processingErr) + p.log.Errorf("failed processing SQS message (message was deleted): %w", processingErr) + return } // An error that may be resolved by letting the visibility timeout @@ -225,12 +263,10 @@ func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *types.Message // queue is enabled then the message will eventually placed on the DLQ // after maximum receives is reached. p.metrics.sqsMessagesReturnedTotal.Inc() - return fmt.Errorf("failed processing SQS message (it will return to queue after visibility timeout): %w", processingErr) + p.log.Errorf("failed processing SQS message (it will return to queue after visibility timeout): %w", processingErr) } -func (p *sqsS3EventProcessor) keepalive(ctx context.Context, log *logp.Logger, wg *sync.WaitGroup, msg *types.Message) { - defer wg.Done() - +func (p *sqsS3EventProcessor) keepalive(ctx context.Context, log *logp.Logger, msg *types.Message) { t := time.NewTicker(p.sqsVisibilityTimeout / 2) defer t.Stop() @@ -355,7 +391,12 @@ func (*sqsS3EventProcessor) isObjectCreatedEvents(event s3EventV2) bool { return event.EventSource == "aws:s3" && strings.HasPrefix(event.EventName, "ObjectCreated:") } -func (p *sqsS3EventProcessor) processS3Events(ctx context.Context, log *logp.Logger, body string) ([]s3ObjectHandler, error) { +func (p *sqsS3EventProcessor) processS3Events( + ctx context.Context, + log *logp.Logger, + body string, + eventCallback func(beat.Event), +) ([]finalizerFunc, error) { s3Events, err := p.getS3Notifications(body) if err != nil { if errors.Is(err, context.Canceled) { @@ -371,57 +412,36 @@ func (p *sqsS3EventProcessor) processS3Events(ctx context.Context, log *logp.Log return nil, nil } - // Create a pipeline client scoped to this goroutine. - client, err := p.pipeline.ConnectWith(beat.ClientConfig{ - EventListener: awscommon.NewEventACKHandler(), - Processing: beat.ProcessingConfig{ - // This input only produces events with basic types so normalization - // is not required. - EventNormalization: boolPtr(false), - }, - }) - if err != nil { - return nil, err - } - defer client.Close() - - // Wait for all events to be ACKed before proceeding. - acker := awscommon.NewEventACKTracker(ctx) - defer acker.Wait() - var errs []error - var handles []s3ObjectHandler + var finalizers []finalizerFunc for i, event := range s3Events { - s3Processor := p.s3ObjectHandler.Create(ctx, log, client, acker, event) + s3Processor := p.s3HandlerFactory.Create(ctx, event) if s3Processor == nil { + // A nil result generally means that this object key doesn't match the + // user-configured filters. continue } // Process S3 object (download, parse, create events). - if err := s3Processor.ProcessS3Object(); err != nil { + if err := s3Processor.ProcessS3Object(log, eventCallback); err != nil { errs = append(errs, fmt.Errorf( "failed processing S3 event for object key %q in bucket %q (object record %d of %d in SQS notification): %w", event.S3.Object.Key, event.S3.Bucket.Name, i+1, len(s3Events), err)) } else { - handles = append(handles, s3Processor) + finalizers = append(finalizers, s3Processor.FinalizeS3Object) } } - // Make sure all s3 events were processed successfully - if len(handles) == len(s3Events) { - return handles, multierr.Combine(errs...) - } - - return nil, multierr.Combine(errs...) + return finalizers, multierr.Combine(errs...) } -func (p *sqsS3EventProcessor) finalizeS3Objects(handles []s3ObjectHandler) error { +func (r sqsProcessingResult) finalizeS3Objects() error { var errs []error - for i, handle := range handles { - if err := handle.FinalizeS3Object(); err != nil { + for i, finalize := range r.finalizers { + if err := finalize(); err != nil { errs = append(errs, fmt.Errorf( "failed finalizing S3 event (object record %d of %d in SQS notification): %w", - i+1, len(handles), err)) + i+1, len(r.finalizers), err)) } } return multierr.Combine(errs...) diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go index 92401fe45eee..c7962bb2f0f3 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "sync" "testing" "time" @@ -22,7 +21,6 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" - awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-concert/timed" ) @@ -41,18 +39,16 @@ func TestSQSS3EventProcessor(t *testing.T) { defer ctrl.Finish() mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) - mockClient := NewMockBeatClient(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) gomock.InOrder( - mockBeatPipeline.EXPECT().ConnectWith(gomock.Any()).Return(mockClient, nil), - mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), - mockClient.EXPECT().Close(), + mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil), mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&msg)).Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) - require.NoError(t, p.ProcessSQS(ctx, &msg)) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockS3HandlerFactory) + result := p.ProcessSQS(ctx, &msg, func(_ beat.Event) {}) + require.NoError(t, result.processingErr) + result.Done() }) t.Run("invalid SQS JSON body does not retry", func(t *testing.T) { @@ -63,7 +59,6 @@ func TestSQSS3EventProcessor(t *testing.T) { defer ctrl.Finish() mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) invalidBodyMsg, err := newSQSMessage(newS3Event("log.json")) require.NoError(t, err) @@ -72,14 +67,13 @@ func TestSQSS3EventProcessor(t *testing.T) { body = body[10:] invalidBodyMsg.Body = &body - gomock.InOrder( - mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&invalidBodyMsg)).Return(nil), - ) + mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&invalidBodyMsg)).Return(nil) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) - err = p.ProcessSQS(ctx, &invalidBodyMsg) - require.Error(t, err) - t.Log(err) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockS3HandlerFactory) + result := p.ProcessSQS(ctx, &invalidBodyMsg, func(_ beat.Event) {}) + require.Error(t, result.processingErr) + t.Log(result.processingErr) + result.Done() }) t.Run("zero S3 events in body", func(t *testing.T) { @@ -90,17 +84,16 @@ func TestSQSS3EventProcessor(t *testing.T) { defer ctrl.Finish() mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) emptyRecordsMsg, err := newSQSMessage([]s3EventV2{}...) require.NoError(t, err) - gomock.InOrder( - mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&emptyRecordsMsg)).Return(nil), - ) + mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&emptyRecordsMsg)).Return(nil) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) - require.NoError(t, p.ProcessSQS(ctx, &emptyRecordsMsg)) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockS3HandlerFactory) + result := p.ProcessSQS(ctx, &emptyRecordsMsg, func(_ beat.Event) {}) + require.NoError(t, result.processingErr) + result.Done() }) t.Run("visibility is extended after half expires", func(t *testing.T) { @@ -114,25 +107,23 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) mockS3Handler := NewMockS3ObjectHandler(ctrl) - mockClient := NewMockBeatClient(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) mockAPI.EXPECT().ChangeMessageVisibility(gomock.Any(), gomock.Eq(&msg), gomock.Eq(visibilityTimeout)).AnyTimes().Return(nil) gomock.InOrder( - mockBeatPipeline.EXPECT().ConnectWith(gomock.Any()).Return(mockClient, nil), - mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, _ *logp.Logger, _ beat.Client, _ *awscommon.EventACKTracker, _ s3EventV2) { + mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any()). + Do(func(ctx context.Context, _ s3EventV2) { require.NoError(t, timed.Wait(ctx, 5*visibilityTimeout)) }).Return(mockS3Handler), - mockS3Handler.EXPECT().ProcessS3Object().Return(nil), - mockClient.EXPECT().Close(), + mockS3Handler.EXPECT().ProcessS3Object(gomock.Any(), gomock.Any()).Return(nil), mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&msg)).Return(nil), mockS3Handler.EXPECT().FinalizeS3Object().Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockBeatPipeline, mockS3HandlerFactory) - require.NoError(t, p.ProcessSQS(ctx, &msg)) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockS3HandlerFactory) + result := p.ProcessSQS(ctx, &msg, func(_ beat.Event) {}) + require.NoError(t, result.processingErr) + result.Done() }) t.Run("message returns to queue on error", func(t *testing.T) { @@ -144,20 +135,17 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) mockS3Handler := NewMockS3ObjectHandler(ctrl) - mockClient := NewMockBeatClient(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) gomock.InOrder( - mockBeatPipeline.EXPECT().ConnectWith(gomock.Any()).Return(mockClient, nil), - mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(mockS3Handler), - mockS3Handler.EXPECT().ProcessS3Object().Return(errors.New("fake connectivity problem")), - mockClient.EXPECT().Close(), + mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any()).Return(mockS3Handler), + mockS3Handler.EXPECT().ProcessS3Object(gomock.Any(), gomock.Any()).Return(errors.New("fake connectivity problem")), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) - err := p.ProcessSQS(ctx, &msg) - t.Log(err) - require.Error(t, err) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockS3HandlerFactory) + result := p.ProcessSQS(ctx, &msg, func(_ beat.Event) {}) + t.Log(result.processingErr) + require.Error(t, result.processingErr) + result.Done() }) t.Run("message is deleted after multiple receives", func(t *testing.T) { @@ -169,8 +157,6 @@ func TestSQSS3EventProcessor(t *testing.T) { mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) mockS3Handler := NewMockS3ObjectHandler(ctrl) - mockClient := NewMockBeatClient(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) msg := msg msg.Attributes = map[string]string{ @@ -178,17 +164,16 @@ func TestSQSS3EventProcessor(t *testing.T) { } gomock.InOrder( - mockBeatPipeline.EXPECT().ConnectWith(gomock.Any()).Return(mockClient, nil), - mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(mockS3Handler), - mockS3Handler.EXPECT().ProcessS3Object().Return(errors.New("fake connectivity problem")), - mockClient.EXPECT().Close(), + mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any()).Return(mockS3Handler), + mockS3Handler.EXPECT().ProcessS3Object(gomock.Any(), gomock.Any()).Return(errors.New("fake connectivity problem")), mockAPI.EXPECT().DeleteMessage(gomock.Any(), gomock.Eq(&msg)).Return(nil), ) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockBeatPipeline, mockS3HandlerFactory) - err := p.ProcessSQS(ctx, &msg) - t.Log(err) - require.Error(t, err) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, time.Minute, 5, mockS3HandlerFactory) + result := p.ProcessSQS(ctx, &msg, func(_ beat.Event) {}) + t.Log(result.eventCount) + require.Error(t, result.processingErr) + result.Done() }) } @@ -227,16 +212,12 @@ func TestSqsProcessor_keepalive(t *testing.T) { defer ctrl.Finish() mockAPI := NewMockSQSAPI(ctrl) mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) - mockBeatPipeline := NewMockBeatPipeline(ctrl) mockAPI.EXPECT().ChangeMessageVisibility(gomock.Any(), gomock.Eq(&msg), gomock.Eq(visibilityTimeout)). Times(1).Return(tc.Err) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockBeatPipeline, mockS3HandlerFactory) - var wg sync.WaitGroup - wg.Add(1) - p.keepalive(ctx, p.log, &wg, &msg) - wg.Wait() + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockS3HandlerFactory) + p.keepalive(ctx, p.log, &msg) }) } } @@ -245,7 +226,7 @@ func TestSqsProcessor_getS3Notifications(t *testing.T) { err := logp.TestingSetup() require.NoError(t, err) - p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, nil, nil, time.Minute, 5, nil, nil) + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, nil, nil, time.Minute, 5, nil) t.Run("s3 key is url unescaped", func(t *testing.T) { msg, err := newSQSMessage(newS3Event("Happy+Face.jpg")) diff --git a/x-pack/filebeat/input/awss3/sqs_test.go b/x-pack/filebeat/input/awss3/sqs_test.go index fff17ebc1a6d..8bc25397eaeb 100644 --- a/x-pack/filebeat/input/awss3/sqs_test.go +++ b/x-pack/filebeat/input/awss3/sqs_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/elastic-agent-libs/logp" ) @@ -33,7 +34,7 @@ func TestSQSReceiver(t *testing.T) { err := logp.TestingSetup() require.NoError(t, err) - const maxMessages = 5 + const workerCount = 5 t.Run("ReceiveMessage success", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) @@ -61,8 +62,6 @@ func TestSQSReceiver(t *testing.T) { ReceiveMessage(gomock.Any(), gomock.Any()). Times(1). DoAndReturn(func(_ context.Context, _ int) ([]types.Message, error) { - // Stop the test. - cancel() return nil, nil }) @@ -72,19 +71,43 @@ func TestSQSReceiver(t *testing.T) { return map[string]string{sqsApproximateNumberOfMessages: "10000"}, nil }).AnyTimes() + mockSQS.EXPECT(). + DeleteMessage(gomock.Any(), gomock.Any()).Times(1).Do( + func(_ context.Context, _ *types.Message) { + cancel() + }) + + logger := logp.NewLogger(inputName) + // Expect the one message returned to have been processed. mockMsgHandler.EXPECT(). - ProcessSQS(gomock.Any(), gomock.Eq(&msg)). + ProcessSQS(gomock.Any(), gomock.Eq(&msg), gomock.Any()). Times(1). - Return(nil) + DoAndReturn( + func(_ context.Context, _ *types.Message, _ func(e beat.Event)) sqsProcessingResult { + return sqsProcessingResult{ + keepaliveCancel: func() {}, + processor: &sqsS3EventProcessor{ + log: logger, + sqs: mockSQS, + }, + } + }) // Execute sqsReader and verify calls/state. - sqsReader := newSQSReaderInput(config{MaxNumberOfMessages: maxMessages}, aws.Config{}) - sqsReader.log = logp.NewLogger(inputName) + sqsReader := newSQSReaderInput(config{NumberOfWorkers: workerCount}, aws.Config{}) + sqsReader.log = logger sqsReader.sqs = mockSQS - sqsReader.msgHandler = mockMsgHandler sqsReader.metrics = newInputMetrics("", nil, 0) + sqsReader.pipeline = &fakePipeline{} + sqsReader.msgHandler = mockMsgHandler sqsReader.run(ctx) + + select { + case <-ctx.Done(): + case <-time.After(time.Second): + require.Fail(t, "Never observed SQS DeleteMessage call") + } }) t.Run("retry after ReceiveMessage error", func(t *testing.T) { @@ -120,11 +143,12 @@ func TestSQSReceiver(t *testing.T) { }).AnyTimes() // Execute SQSReader and verify calls/state. - sqsReader := newSQSReaderInput(config{MaxNumberOfMessages: maxMessages}, aws.Config{}) + sqsReader := newSQSReaderInput(config{NumberOfWorkers: workerCount}, aws.Config{}) sqsReader.log = logp.NewLogger(inputName) sqsReader.sqs = mockSQS sqsReader.msgHandler = mockMsgHandler sqsReader.metrics = newInputMetrics("", nil, 0) + sqsReader.pipeline = &fakePipeline{} sqsReader.run(ctx) }) } diff --git a/x-pack/filebeat/module/aws/_meta/config.yml b/x-pack/filebeat/module/aws/_meta/config.yml index e92cb36e7b53..da0377b6e462 100644 --- a/x-pack/filebeat/module/aws/_meta/config.yml +++ b/x-pack/filebeat/module/aws/_meta/config.yml @@ -14,7 +14,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Process CloudTrail logs @@ -63,9 +63,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -87,7 +84,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -124,9 +121,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -148,7 +142,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -185,9 +179,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -209,7 +200,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -246,9 +237,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -270,7 +258,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -307,9 +295,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -331,7 +316,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -368,9 +353,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml index ada3a502fc20..0f395737a052 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml @@ -77,10 +77,6 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} -{{ if .max_number_of_messages }} -max_number_of_messages: {{ .max_number_of_messages }} -{{ end }} - {{ if .proxy_url }} proxy_url: {{ .proxy_url }} {{ end }} diff --git a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml index f19760eb6372..84e6d9060376 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml @@ -28,7 +28,6 @@ var: default: true - name: fips_enabled - name: proxy_url - - name: max_number_of_messages - name: ssl ingest_pipeline: ingest/pipeline.yml diff --git a/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml b/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml index 8ce1970290d2..4c0260809259 100644 --- a/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml @@ -62,10 +62,6 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} -{{ if .max_number_of_messages }} -max_number_of_messages: {{ .max_number_of_messages }} -{{ end }} - {{ if .proxy_url }} proxy_url: {{ .proxy_url }} {{ end }} diff --git a/x-pack/filebeat/module/aws/s3access/manifest.yml b/x-pack/filebeat/module/aws/s3access/manifest.yml index e52ba6737579..dc17d1169282 100644 --- a/x-pack/filebeat/module/aws/s3access/manifest.yml +++ b/x-pack/filebeat/module/aws/s3access/manifest.yml @@ -22,7 +22,6 @@ var: default: [forwarded] - name: fips_enabled - name: proxy_url - - name: max_number_of_messages - name: ssl ingest_pipeline: ingest/pipeline.yml diff --git a/x-pack/filebeat/module/aws/vpcflow/config/input.yml b/x-pack/filebeat/module/aws/vpcflow/config/input.yml index ecb1842be7a8..34feb9880b64 100644 --- a/x-pack/filebeat/module/aws/vpcflow/config/input.yml +++ b/x-pack/filebeat/module/aws/vpcflow/config/input.yml @@ -64,10 +64,6 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} -{{ if .max_number_of_messages }} -max_number_of_messages: {{ .max_number_of_messages }} -{{ end }} - {{ if .proxy_url }} proxy_url: {{ .proxy_url }} {{ end }} diff --git a/x-pack/filebeat/module/aws/vpcflow/manifest.yml b/x-pack/filebeat/module/aws/vpcflow/manifest.yml index de772408a868..0787eb019b71 100644 --- a/x-pack/filebeat/module/aws/vpcflow/manifest.yml +++ b/x-pack/filebeat/module/aws/vpcflow/manifest.yml @@ -22,7 +22,6 @@ var: default: [forwarded, preserve_original_event] - name: fips_enabled - name: proxy_url - - name: max_number_of_messages - name: ssl - name: format default: diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index c730b8aea074..44d5e768ddc9 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -17,7 +17,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Process CloudTrail logs @@ -66,9 +66,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -90,7 +87,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -127,9 +124,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -151,7 +145,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -188,9 +182,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -212,7 +203,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -249,9 +240,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -273,7 +261,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -310,9 +298,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 @@ -334,7 +319,7 @@ # Bucket list interval on S3 bucket #var.bucket_list_interval: 300s - # Number of workers on S3 bucket + # Number of workers on S3 bucket or SQS queue #var.number_of_workers: 5 # Filename of AWS credential file @@ -371,9 +356,6 @@ # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. #var.fips_enabled: false - # The maximum number of messages to return from SQS. Valid values: 1 to 10. - #var.max_number_of_messages: 5 - # URL to proxy AWS API calls #var.proxy_url: http://proxy:3128 From 9992eb5e81a4df7c7301d20fbd84145219364516 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Tue, 15 Oct 2024 21:43:12 -0700 Subject: [PATCH 40/90] [auditbeat] Use shared process cache in `add_session_metadata` processor (#40934) This changes to use a shared process cache in the add_session_metadata processor. This cache is provided by quark and go-quark. The are currently several process caches in auditbeat. The long term intention is to move all process caches to the shared cache provided by quark. This will reduce resource usage, and improve maintainability by not having multiple implementations of a process cache within Auditbeat. With this change, the process cache that was previously being used by the ebpf backend is no longer used, and quark will provide process data that's required for enrichment. Rather than needing to track processes from within this processor, quark handles everything, so the processor will now only need to request process data from quark when enrichment happens. The add_session_metadata process DB code isn't removed, since it's still used by the procfs backend. That backend is intended to be used on systems that aren't supported by the modern backend. Still, quark also supports as far back as CentOS 7, so there will be few systems that will actually use the procfs backend now. The procfs backend could potentially be removed entirely, along with the process DB cache code in the processor, in the future. --- NOTICE.txt | 113 +--- dev-tools/notice/overrides.json | 1 + go.mod | 6 +- go.sum | 24 +- .../sessionmd/add_session_metadata.go | 70 ++- .../sessionmd/add_session_metadata_test.go | 2 +- .../docs/add_session_metadata.asciidoc | 35 +- .../processors/sessionmd/processdb/db.go | 13 +- .../sessionmd/processdb/entry_leader_test.go | 2 +- .../processors/sessionmd/procfs/procfs.go | 8 +- .../provider/ebpf_provider/ebpf_provider.go | 231 -------- .../kerneltracingprovider_linux.go | 528 ++++++++++++++++++ .../kerneltracingprovider_other.go | 31 + .../procfsprovider.go} | 40 +- .../procfsprovider_test.go} | 2 +- .../processors/sessionmd/provider/provider.go | 2 + .../processors/sessionmd/types/events.go | 4 +- .../processors/sessionmd/types/process.go | 3 + 18 files changed, 695 insertions(+), 420 deletions(-) delete mode 100644 x-pack/auditbeat/processors/sessionmd/provider/ebpf_provider/ebpf_provider.go create mode 100644 x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go create mode 100644 x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go rename x-pack/auditbeat/processors/sessionmd/provider/{procfs_provider/procfs_provider.go => procfsprovider/procfsprovider.go} (78%) rename x-pack/auditbeat/processors/sessionmd/provider/{procfs_provider/procfs_provider_test.go => procfsprovider/procfsprovider_test.go} (99%) diff --git a/NOTICE.txt b/NOTICE.txt index 4447873499fc..74fdd66fd1f1 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -14745,6 +14745,18 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-quark +Version: v0.1.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.1.2/LICENSE.txt: + +Source code in this repository is licensed under the Apache License Version 2.0, +an Apache compatible license. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-seccomp-bpf Version: v1.4.0 @@ -23990,11 +24002,11 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.2/LICENS -------------------------------------------------------------------------------- Dependency : go.elastic.co/go-licence-detector -Version: v0.6.1 +Version: v0.7.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/go-licence-detector@v0.6.1/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/go-licence-detector@v0.7.0/LICENSE: Apache License @@ -43095,37 +43107,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/gobuffalo/here -Version: v0.6.7 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gobuffalo/here@v0.6.7/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2019 Mark Bates - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/goccy/go-json Version: v0.10.2 @@ -49853,41 +49834,6 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/karrick/godirwalk -Version: v1.17.0 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/karrick/godirwalk@v1.17.0/LICENSE: - -BSD 2-Clause License - -Copyright (c) 2017, Karrick McDermott -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/kballard/go-shellquote Version: v0.0.0-20180428030007-95032a82bc51 @@ -50645,37 +50591,6 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/markbates/pkger -Version: v0.17.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/markbates/pkger@v0.17.1/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2019 Mark Bates - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/martini-contrib/render Version: v0.0.0-20150707142108-ec18f8345a11 diff --git a/dev-tools/notice/overrides.json b/dev-tools/notice/overrides.json index bb82c97ebe40..a50cac02e0fb 100644 --- a/dev-tools/notice/overrides.json +++ b/dev-tools/notice/overrides.json @@ -19,3 +19,4 @@ {"name": "github.com/JohnCGriffin/overflow", "licenceType": "MIT"} {"name": "github.com/elastic/ebpfevents", "licenceType": "Apache-2.0"} {"name": "go.opentelemetry.io/collector/config/configopaque", "licenceType": "Apache-2.0"} +{"name": "github.com/elastic/go-quark", "licenceType": "Apache-2.0"} diff --git a/go.mod b/go.mod index 9459bb0f13e4..0f3c26503ca9 100644 --- a/go.mod +++ b/go.mod @@ -130,7 +130,7 @@ require ( github.com/ugorji/go/codec v1.1.8 github.com/vmware/govmomi v0.39.0 go.elastic.co/ecszap v1.0.2 - go.elastic.co/go-licence-detector v0.6.1 + go.elastic.co/go-licence-detector v0.7.0 go.etcd.io/bbolt v1.3.10 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 @@ -192,6 +192,7 @@ require ( github.com/elastic/elastic-agent-libs v0.12.1 github.com/elastic/elastic-agent-system-metrics v0.11.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 + github.com/elastic/go-quark v0.1.2 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.15.0 github.com/elastic/tk-btf v0.1.0 @@ -304,7 +305,6 @@ require ( github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-resty/resty/v2 v2.13.1 // indirect - github.com/gobuffalo/here v0.6.7 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godror/knownpb v0.1.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect @@ -336,7 +336,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/karrick/godirwalk v1.17.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/compress v1.17.9 // indirect @@ -345,7 +344,6 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/markbates/pkger v0.17.1 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect diff --git a/go.sum b/go.sum index 1a57c8132766..0362a16115fe 100644 --- a/go.sum +++ b/go.sum @@ -381,6 +381,8 @@ github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f h1:TsPpU5EAwlt github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f/go.mod h1:HHaWnZamYKWsR9/eZNHqRHob8iQDKnchHmmskT/SKko= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 h1:q8n4QjcLa4q39Q3fqHRknTBXBtegjriHFrB42YKgXGI= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P1ZxnKx2OsqY7KlHdCesqZWIhyq0Gs/QC/Us= +github.com/elastic/go-quark v0.1.2 h1:Hnov9q8D9ofS976SODWWYAZ23IpgPILxTUCiccmhw0c= +github.com/elastic/go-quark v0.1.2/go.mod h1:/ngqgumD/Z5vnFZ4XPN2kCbxnEfG5/Uc+bRvOBabVVA= github.com/elastic/go-seccomp-bpf v1.4.0 h1:6y3lYrEHrLH9QzUgOiK8WDqmPaMnnB785WxibCNIOH4= github.com/elastic/go-seccomp-bpf v1.4.0/go.mod h1:wIMxjTbKpWGQk4CV9WltlG6haB4brjSH/dvAohBPM1I= github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 h1:yuiN60oaQUz2PtNpNhDI2H6zrCdfiiptmNdwV5WUaKA= @@ -483,9 +485,6 @@ github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobuffalo/here v0.6.7 h1:hpfhh+kt2y9JLDfhYUxxCRxQol540jsVfKUZzjlbp8o= -github.com/gobuffalo/here v0.6.7/go.mod h1:vuCfanjqckTuRlqAitJz6QC4ABNnS27wLb816UhsPcc= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be h1:zXHeEEJ231bTf/IXqvCfeaqjLpXsq42ybLoT4ROSR6Y= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be/go.mod h1:/oj50ZdPq/cUjA02lMZhijk5kR31SEydKyqah1OgBuo= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= @@ -564,7 +563,6 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/licenseclassifier v0.0.0-20221004142553-c1ed8fcf4bab h1:okY7fFoWybMbxiHkaqStN4mxSrPfYmTZl5Zh32Z5FjY= github.com/google/licenseclassifier v0.0.0-20221004142553-c1ed8fcf4bab/go.mod h1:jkYIPv59uiw+1MxTWlqQEKebsUDV1DCXQtBBn5lVzf4= github.com/google/licenseclassifier/v2 v2.0.0-alpha.1/go.mod h1:YAgBGGTeNDMU+WfIgaFvjZe4rudym4f6nIn8ZH5X+VM= @@ -677,9 +675,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.15.6/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= -github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -713,9 +708,6 @@ github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/markbates/pkger v0.17.1 h1:/MKEtWqtc0mZvu9OinB9UzVN9iYCwLWuyUv4Bw+PCno= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -855,7 +847,6 @@ github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e h1:hUGyBE/4CXRPTh github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e/go.mod h1:Sb6li54lXV0yYEjI4wX8cucdQ9gqUJV3+Ngg3l9g30I= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54 h1:jbchLJWyhKcmOjkbC4zDvT/n5EEd7g6hnnF760rEyRA= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= @@ -955,8 +946,8 @@ go.elastic.co/ecszap v1.0.2 h1:iW5OGx8IiokiUzx/shD4AJCPFMC9uUtr7ycaiEIU++I= go.elastic.co/ecszap v1.0.2/go.mod h1:dJkSlK3BTiwG/qXhCwe50Mz/jwu854vSip8sIeQhNZg= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= -go.elastic.co/go-licence-detector v0.6.1 h1:T2PFHYdow+9mAjj6K5ehn5anTxtsURfom2P4S6PgMzg= -go.elastic.co/go-licence-detector v0.6.1/go.mod h1:qQ1clBRS2f0Ee5ie+y2LLYnyhSNJNm0Ha6d7SoYVtM4= +go.elastic.co/go-licence-detector v0.7.0 h1:qC31sfyfNcNx/zMYcLABU0ac3MbGHZgksCAb5lMDUMg= +go.elastic.co/go-licence-detector v0.7.0/go.mod h1:f5ty8pjynzQD8BcS+s0qtlOGKc35/HKQxCVi8SHhV5k= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= @@ -1054,8 +1045,6 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1086,7 +1075,6 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= @@ -1106,8 +1094,6 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1206,7 +1192,6 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1287,7 +1272,6 @@ gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index 4fa86c25d029..28ef4697b79a 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -13,12 +13,14 @@ import ( "strconv" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/processdb" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/procfs" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider" - "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider/ebpf_provider" - "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider/procfs_provider" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" cfg "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" @@ -35,13 +37,17 @@ func InitializeModule() { } type addSessionMetadata struct { + ctx context.Context + cancel context.CancelFunc config config logger *logp.Logger db *processdb.DB provider provider.Provider + backend string } func New(cfg *cfg.C) (beat.Processor, error) { + cfgwarn.Beta("add_session_metadata processor is a beta feature.") c := defaultConfig() if err := cfg.Unpack(&c); err != nil { return nil, fmt.Errorf("fail to unpack the %v configuration: %w", processorName, err) @@ -49,49 +55,59 @@ func New(cfg *cfg.C) (beat.Processor, error) { logger := logp.NewLogger(logName) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) reader := procfs.NewProcfsReader(*logger) db, err := processdb.NewDB(reader, *logger) if err != nil { + cancel() return nil, fmt.Errorf("failed to create DB: %w", err) } - backfilledPIDs := db.ScrapeProcfs() - logger.Infof("backfilled %d processes", len(backfilledPIDs)) + if c.Backend != "kernel_tracing" { + backfilledPIDs := db.ScrapeProcfs() + logger.Infof("backfilled %d processes", len(backfilledPIDs)) + } var p provider.Provider switch c.Backend { case "auto": - p, err = ebpf_provider.NewProvider(ctx, logger, db) + p, err = kerneltracingprovider.NewProvider(ctx, logger) if err != nil { - // Most likely cause of error is not supporting ebpf on system, try procfs - p, err = procfs_provider.NewProvider(ctx, logger, db, reader, c.PIDField) + // Most likely cause of error is not supporting ebpf or kprobes on system, try procfs + p, err = procfsprovider.NewProvider(ctx, logger, db, reader, c.PIDField) if err != nil { + cancel() return nil, fmt.Errorf("failed to create provider: %w", err) } logger.Info("backend=auto using procfs") } else { - logger.Info("backend=auto using ebpf") + logger.Info("backend=auto using kernel_tracing") } - case "ebpf": - p, err = ebpf_provider.NewProvider(ctx, logger, db) + case "procfs": + p, err = procfsprovider.NewProvider(ctx, logger, db, reader, c.PIDField) if err != nil { - return nil, fmt.Errorf("failed to create ebpf provider: %w", err) + cancel() + return nil, fmt.Errorf("failed to create procfs provider: %w", err) } - case "procfs": - p, err = procfs_provider.NewProvider(ctx, logger, db, reader, c.PIDField) + case "kernel_tracing": + p, err = kerneltracingprovider.NewProvider(ctx, logger) if err != nil { - return nil, fmt.Errorf("failed to create ebpf provider: %w", err) + cancel() + return nil, fmt.Errorf("failed to create kernel_tracing provider: %w", err) } default: + cancel() return nil, fmt.Errorf("unknown backend configuration") } return &addSessionMetadata{ + ctx: ctx, + cancel: cancel, config: c, logger: logger, db: db, provider: p, + backend: c.Backend, }, nil } @@ -127,6 +143,7 @@ func (p *addSessionMetadata) Run(ev *beat.Event) (*beat.Event, error) { func (p *addSessionMetadata) Close() error { p.db.Close() + p.cancel() return nil } @@ -145,13 +162,24 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { return nil, fmt.Errorf("cannot parse pid field '%s': %w", p.config.PIDField, err) } - fullProcess, err := p.db.GetProcess(pid) - if err != nil { - e := fmt.Errorf("pid %v not found in db: %w", pid, err) - p.logger.Errorf("%v", e) - return nil, e + var fullProcess types.Process + if p.backend == "kernel_tracing" { + // kernel_tracing doesn't enrich with the processor DB; process info is taken directly from quark cache + proc, err := p.provider.GetProcess(pid) + if err != nil { + e := fmt.Errorf("pid %v not found in db: %w", pid, err) + p.logger.Warnw("PID not found in provider", "pid", pid, "error", err) + return nil, e + } + fullProcess = *proc + } else { + fullProcess, err = p.db.GetProcess(pid) + if err != nil { + e := fmt.Errorf("pid %v not found in db: %w", pid, err) + p.logger.Warnw("PID not found in provider", "pid", pid, "error", err) + return nil, e + } } - processMap := fullProcess.ToMap() if b, err := ev.Fields.HasKey("process"); !b || err != nil { diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go index 95892482f80e..a993737611bd 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go @@ -361,7 +361,7 @@ func TestEnrich(t *testing.T) { require.Nil(t, err, "%s: enrich error: %w", tt.testName, err) require.NotNil(t, actual, "%s: returned nil event", tt.testName) - //Validate output + // Validate output if diff := cmp.Diff(tt.expected.Fields, actual.Fields, ignoreMissingFrom(tt.expected.Fields)); diff != "" { t.Errorf("field mismatch:\n%s", diff) } diff --git a/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc b/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc index d29c5d0ac80b..aaddde322c14 100644 --- a/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc +++ b/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc @@ -8,7 +8,7 @@ beta::[] The `add_session_metadata` processor enriches process events with additional information that users can see using the {security-guide}/session-view.html[Session View] tool in the -{elastic-sec} platform. +{elastic-sec} platform. NOTE: The current release of `add_session_metadata` processor for {auditbeat} is limited to virtual machines (VMs) and bare metal environments. @@ -27,9 +27,9 @@ auditbeat.modules: [[add-session-metadata-explained]] ==== How the `add_session_metadata` processor works -Using the available Linux kernel technology, the processor collects comprehensive information on all running system processes, compiling this data into a process database. -When processing an event (such as those generated by the {auditbeat} `auditd` module), the processor queries this database to retrieve information about related processes, including the parent process, session leader, process group leader, and entry leader. -It then enriches the original event with this metadata, providing a more complete picture of process relationships and system activities. +Using the available Linux kernel technology, the processor collects comprehensive information on all running system processes, compiling this data into a process database. +When processing an event (such as those generated by the {auditbeat} `auditd` module), the processor queries this database to retrieve information about related processes, including the parent process, session leader, process group leader, and entry leader. +It then enriches the original event with this metadata, providing a more complete picture of process relationships and system activities. This enhanced data enables the powerful {security-guide}/session-view.html[Session View] tool in the {elastic-sec} platform, offering users deeper insights for analysis and investigation. @@ -40,17 +40,18 @@ This enhanced data enables the powerful {security-guide}/session-view.html[Sessi The `add_session_metadata` processor operates using various backend options. * `auto` is the recommended setting. - It attempts to use `ebpf` first, falling back to `procfs` if necessary, ensuring compatibility even on systems without `ebpf` support. -* `ebpf` collects process information with eBPF. - This backend requires a system with Linux kernel 5.10.16 or above, kernel support for eBPF enabled, and auditbeat running as superuser. -* `procfs` collects process information with the proc filesystem. - This is compatible with older systems that may not support ebpf. - To gather complete process info, auditbeat requires permissions to read all process data in procfs; for example, run as a superuser or have the `SYS_PTRACE` capability. + It attempts to use `kernel_tracing` first, falling back to `procfs` if necessary, ensuring compatibility even on systems without `kernel_tracing` support. +* `kernel_tracing` collects process information with eBPF or kprobes. + This backend will prefer to use eBPF, if eBPF is not supported kprobes will be used. eBPF requires a system with Linux kernel 5.10.16 or above, kernel support for eBPF enabled, and auditbeat running as superuser. + Kprobe support required Linux kernel 3.10.0 or above, and auditbeat running as a superuser. +* `procfs` collects process information with the proc filesystem. + This is compatible with older systems that may not support ebpf. + To gather complete process info, auditbeat requires permissions to read all process data in procfs; for example, run as a superuser or have the `SYS_PTRACE` capability. [[add-session-metadata-containers]] ===== Containers -If you are running {auditbeat} in a container, the container must run in the host's PID namespace. -With the `auto` or `ebpf` backend, these host directories must also be mounted to the same path within the container: `/sys/kernel/debug`, `/sys/fs/bpf`. +If you are running {auditbeat} in a container, the container must run in the host's PID namespace. +With the `auto` or `kernel_tracing` backend, these host directories must also be mounted to the same path within the container: `/sys/kernel/debug`, `/sys/fs/bpf`. [[add-session-metadata-enable]] ==== Enable and configure Session View in {auditbeat} @@ -58,10 +59,10 @@ With the `auto` or `ebpf` backend, these host directories must also be mounted t To configure and enable {security-guide}/session-view.html[Session View] functionality, you'll: * Add the `add_sessions_metadata` processor to your `auditbeat.yml` file. -* Configure audit rules in your `auditbeat.yml` file. +* Configure audit rules in your `auditbeat.yml` file. * Restart {auditbeat}. -We'll walk you through these steps in more detail. +We'll walk you through these steps in more detail. . Edit your `auditbeat.yml` file and add this info to the modules configuration section: + @@ -89,11 +90,11 @@ auditbeat.modules: -a always,exit -F arch=b64 -S setsid ------------------------------------- + -. Save your configuration changes. +. Save your configuration changes. + -. Restart {auditbeat}: +. Restart {auditbeat}: + [source,sh] ------------------------------------- sudo systemctl restart auditbeat -------------------------------------- \ No newline at end of file +------------------------------------- diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/db.go b/x-pack/auditbeat/processors/sessionmd/processdb/db.go index 28c848ddfdbc..e18c247a8590 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/db.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/db.go @@ -254,6 +254,13 @@ func (db *DB) InsertFork(fork types.ProcessForkEvent) { } } +func (db *DB) InsertProcess(process Process) { + db.mutex.Lock() + defer db.mutex.Unlock() + + db.insertProcess(process) +} + func (db *DB) insertProcess(process Process) { pid := process.PIDs.Tgid db.processes[pid] = process @@ -458,8 +465,8 @@ func fullProcessFromDBProcess(p Process) types.Process { } ret.Thread.Capabilities.Permitted, _ = capabilities.FromUint64(p.Creds.CapPermitted) ret.Thread.Capabilities.Effective, _ = capabilities.FromUint64(p.Creds.CapEffective) - ret.TTY.CharDevice.Major = p.CTTY.Major - ret.TTY.CharDevice.Minor = p.CTTY.Minor + ret.TTY.CharDevice.Major = uint16(p.CTTY.Major) + ret.TTY.CharDevice.Minor = uint16(p.CTTY.Minor) ret.ExitCode = p.ExitCode return ret @@ -736,7 +743,7 @@ func isFilteredExecutable(executable string) bool { return stringStartsWithEntryInList(executable, filteredExecutables[:]) } -func getTTYType(major uint16, minor uint16) TTYType { +func getTTYType(major uint32, minor uint32) TTYType { if major >= ptsMinMajor && major <= ptsMaxMajor { return Pts } diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go b/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go index 74140f47f6cd..fa0bc6e17993 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go @@ -1491,7 +1491,7 @@ func TestPIDReuseNewSession(t *testing.T) { ExitCode: 0, }) - //2nd session + // 2nd session x1 := bashPID x2 := sshd0PID sshd0PID = command0PID diff --git a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go index fc843373389d..b76dfdfdb485 100644 --- a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go +++ b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go @@ -18,12 +18,12 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -func MajorTTY(ttyNr uint32) uint16 { - return uint16((ttyNr >> 8) & 0xff) +func MajorTTY(ttyNr uint32) uint32 { + return (ttyNr >> 8) & 0xff } -func MinorTTY(ttyNr uint32) uint16 { - return uint16(((ttyNr & 0xfff00000) >> 20) | (ttyNr & 0xff)) +func MinorTTY(ttyNr uint32) uint32 { + return ((ttyNr >> 12) & 0xfff00) | (ttyNr & 0xff) } // this interface exists so that we can inject a mock procfs reader for deterministic testing diff --git a/x-pack/auditbeat/processors/sessionmd/provider/ebpf_provider/ebpf_provider.go b/x-pack/auditbeat/processors/sessionmd/provider/ebpf_provider/ebpf_provider.go deleted file mode 100644 index 31220465dfe4..000000000000 --- a/x-pack/auditbeat/processors/sessionmd/provider/ebpf_provider/ebpf_provider.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux - -package ebpf_provider - -import ( - "context" - "fmt" - "time" - - "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/ebpf" - "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/processdb" - "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider" - "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" - "github.com/elastic/ebpfevents" - "github.com/elastic/elastic-agent-libs/logp" -) - -const ( - name = "add_session_metadata" - eventMask = ebpf.EventMask(ebpfevents.EventTypeProcessFork | ebpfevents.EventTypeProcessExec | ebpfevents.EventTypeProcessExit) -) - -type prvdr struct { - ctx context.Context - logger *logp.Logger - db *processdb.DB -} - -func NewProvider(ctx context.Context, logger *logp.Logger, db *processdb.DB) (provider.Provider, error) { - p := prvdr{ - ctx: ctx, - logger: logger, - db: db, - } - - w, err := ebpf.GetWatcher() - if err != nil { - return nil, fmt.Errorf("get ebpf watcher: %w", err) - } - - records := w.Subscribe(name, eventMask) - - go func(logger logp.Logger) { - for { - r := <-records - if r.Error != nil { - logger.Warnw("received error from the ebpf subscription", "error", err) - continue - } - if r.Event == nil { - continue - } - ev := r.Event - switch ev.Type { - case ebpfevents.EventTypeProcessFork: - body, ok := ev.Body.(*ebpfevents.ProcessFork) - if !ok { - logger.Errorf("unexpected event body, got %T", ev.Body) - continue - } - pe := types.ProcessForkEvent{ - ParentPIDs: types.PIDInfo{ - Tid: body.ParentPids.Tid, - Tgid: body.ParentPids.Tgid, - Ppid: body.ParentPids.Ppid, - Pgid: body.ParentPids.Pgid, - Sid: body.ParentPids.Sid, - StartTimeNS: body.ParentPids.StartTimeNs, - }, - ChildPIDs: types.PIDInfo{ - Tid: body.ChildPids.Tid, - Tgid: body.ChildPids.Tgid, - Ppid: body.ChildPids.Ppid, - Pgid: body.ChildPids.Pgid, - Sid: body.ChildPids.Sid, - StartTimeNS: body.ChildPids.StartTimeNs, - }, - Creds: types.CredInfo{ - Ruid: body.Creds.Ruid, - Rgid: body.Creds.Rgid, - Euid: body.Creds.Euid, - Egid: body.Creds.Egid, - Suid: body.Creds.Suid, - Sgid: body.Creds.Sgid, - CapPermitted: body.Creds.CapPermitted, - CapEffective: body.Creds.CapEffective, - }, - } - p.db.InsertFork(pe) - case ebpfevents.EventTypeProcessExec: - body, ok := ev.Body.(*ebpfevents.ProcessExec) - if !ok { - logger.Errorf("unexpected event body") - continue - } - pe := types.ProcessExecEvent{ - PIDs: types.PIDInfo{ - Tid: body.Pids.Tid, - Tgid: body.Pids.Tgid, - Ppid: body.Pids.Ppid, - Pgid: body.Pids.Pgid, - Sid: body.Pids.Sid, - StartTimeNS: body.Pids.StartTimeNs, - }, - Creds: types.CredInfo{ - Ruid: body.Creds.Ruid, - Rgid: body.Creds.Rgid, - Euid: body.Creds.Euid, - Egid: body.Creds.Egid, - Suid: body.Creds.Suid, - Sgid: body.Creds.Sgid, - CapPermitted: body.Creds.CapPermitted, - CapEffective: body.Creds.CapEffective, - }, - CTTY: types.TTYDev{ - Major: body.CTTY.Major, - Minor: body.CTTY.Minor, - }, - CWD: body.Cwd, - Argv: body.Argv, - Env: body.Env, - Filename: body.Filename, - } - p.db.InsertExec(pe) - case ebpfevents.EventTypeProcessExit: - body, ok := ev.Body.(*ebpfevents.ProcessExit) - if !ok { - logger.Errorf("unexpected event body") - continue - } - pe := types.ProcessExitEvent{ - PIDs: types.PIDInfo{ - Tid: body.Pids.Tid, - Tgid: body.Pids.Tgid, - Ppid: body.Pids.Ppid, - Pgid: body.Pids.Pgid, - Sid: body.Pids.Sid, - StartTimeNS: body.Pids.StartTimeNs, - }, - ExitCode: body.ExitCode, - } - p.db.InsertExit(pe) - } - } - }(*p.logger) - - return &p, nil -} - -const ( - maxWaitLimit = 200 * time.Millisecond // Maximum time SyncDB will wait for process - combinedWaitLimit = 2 * time.Second // Multiple SyncDB calls will wait up to this amount within resetDuration - backoffDuration = 10 * time.Second // SyncDB will stop waiting for processes for this time - resetDuration = 5 * time.Second // After this amount of times with no backoffs, the combinedWait will be reset -) - -var ( - combinedWait = 0 * time.Millisecond - inBackoff = false - backoffStart = time.Now() - since = time.Now() - backoffSkipped = 0 -) - -// With ebpf, process events are pushed to the DB by the above goroutine, so this doesn't actually update the DB. -// It does to try sync the processor and ebpf events, so that the process is in the process db before continuing. -// -// It's possible that the event to enrich arrives before the process is inserted into the DB. In that case, this -// will block continuing the enrichment until the process is seen (or the timeout is reached). -// -// If for some reason a lot of time has been spent waiting for missing processes, this also has a backoff timer during -// which it will continue without waiting for missing events to arrive, so the processor doesn't become overly backed-up -// waiting for these processes, at the cost of possibly not enriching some processes. -func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { - if s.db.HasProcess(pid) { - return nil - } - - now := time.Now() - if inBackoff { - if now.Sub(backoffStart) > backoffDuration { - s.logger.Warnf("ended backoff, skipped %d processes", backoffSkipped) - inBackoff = false - combinedWait = 0 * time.Millisecond - } else { - backoffSkipped += 1 - return nil - } - } else { - if combinedWait > combinedWaitLimit { - s.logger.Warn("starting backoff") - inBackoff = true - backoffStart = now - backoffSkipped = 0 - return nil - } - // maintain a moving window of time for the delays we track - if now.Sub(since) > resetDuration { - since = now - combinedWait = 0 * time.Millisecond - } - } - - start := now - nextWait := 5 * time.Millisecond - for { - waited := time.Since(start) - if s.db.HasProcess(pid) { - s.logger.Debugf("got process that was missing after %v", waited) - combinedWait = combinedWait + waited - return nil - } - if waited >= maxWaitLimit { - e := fmt.Errorf("process %v was not seen after %v", pid, waited) - s.logger.Warnf("%v", e) - combinedWait = combinedWait + waited - return e - } - time.Sleep(nextWait) - if nextWait*2+waited > maxWaitLimit { - nextWait = maxWaitLimit - waited - } else { - nextWait = nextWait * 2 - } - } -} diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go new file mode 100644 index 000000000000..966f4b36c30c --- /dev/null +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go @@ -0,0 +1,528 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux && (amd64 || arm64) && cgo + +package kerneltracingprovider + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + quark "github.com/elastic/go-quark" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" + "github.com/elastic/elastic-agent-libs/logp" +) + +type prvdr struct { + ctx context.Context + logger *logp.Logger + qq *quark.Queue + qqMtx *sync.Mutex + combinedWait time.Duration + inBackoff bool + backoffStart time.Time + since time.Time + backoffSkipped int +} + +type TTYType int + +const ( + TTYUnknown TTYType = iota + Pts + TTY + TTYConsole +) + +const ( + Init = "init" + Sshd = "sshd" + Ssm = "ssm" + Container = "container" + Terminal = "terminal" + Kthread = "kthread" + EntryConsole = "console" + EntryUnknown = "unknown" +) + +const ( + ptsMinMajor = 136 + ptsMaxMajor = 143 + ttyMajor = 4 + consoleMaxMinor = 63 + ttyMaxMinor = 255 +) + +var ( + bootID string + pidNsInode uint64 +) + +func readBootID() (string, error) { + bootID, err := os.ReadFile("/proc/sys/kernel/random/boot_id") + if err != nil { + return "", fmt.Errorf("could not read /proc/sys/kernel/random/boot_id, process entity IDs will not be correct: %w", err) + } + + return strings.TrimRight(string(bootID), "\n"), nil +} + +func readPIDNsInode() (uint64, error) { + var ret uint64 + + pidNsInodeRaw, err := os.Readlink("/proc/self/ns/pid") + if err != nil { + return 0, fmt.Errorf("could not read /proc/self/ns/pid: %w", err) + } + + if _, err = fmt.Sscanf(pidNsInodeRaw, "pid:[%d]", &ret); err != nil { + return 0, fmt.Errorf("could not parse contents of /proc/self/ns/pid (%q): %w", pidNsInodeRaw, err) + } + + return ret, nil +} + +func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, error) { + attr := quark.DefaultQueueAttr() + attr.Flags = quark.QQ_ALL_BACKENDS | quark.QQ_ENTRY_LEADER | quark.QQ_NO_SNAPSHOT + qq, err := quark.OpenQueue(attr, 64) + if err != nil { + return nil, fmt.Errorf("open queue: %w", err) + } + + p := &prvdr{ + ctx: ctx, + logger: logger, + qq: qq, + qqMtx: new(sync.Mutex), + combinedWait: 0 * time.Millisecond, + inBackoff: false, + backoffStart: time.Now(), + since: time.Now(), + backoffSkipped: 0, + } + + go func(ctx context.Context, qq *quark.Queue, logger *logp.Logger, p *prvdr) { + defer qq.Close() + for ctx.Err() == nil { + p.qqMtx.Lock() + events, err := qq.GetEvents() + p.qqMtx.Unlock() + if err != nil { + logger.Errorw("get events from quark, no more process enrichment from this processor will be done", "error", err) + break + } + if len(events) == 0 { + err = qq.Block() + if err != nil { + logger.Errorw("quark block, no more process enrichment from this processor will be done", "error", err) + break + } + } + } + }(ctx, qq, logger, p) + + bootID, err = readBootID() + if err != nil { + p.logger.Errorw("failed to read boot ID, entity ID will not be correct", "error", err) + } + pidNsInode, err = readPIDNsInode() + if err != nil { + p.logger.Errorw("failed to read PID namespace inode, entity ID will not be correct", "error", err) + } + + return p, nil +} + +const ( + maxWaitLimit = 1200 * time.Millisecond // Maximum time SyncDB will wait for process + combinedWaitLimit = 15 * time.Second // Multiple SyncDB calls will wait up to this amount within resetDuration + backoffDuration = 10 * time.Second // SyncDB will stop waiting for processes for this time + resetDuration = 5 * time.Second // After this amount of times with no backoffs, the combinedWait will be reset +) + +func (p *prvdr) SyncDB(_ *beat.Event, pid uint32) error { + p.qqMtx.Lock() + defer p.qqMtx.Unlock() + + // Use qq.Lookup, not lookupLocked, in this function. Mutex is locked for entire function + + if _, found := p.qq.Lookup(int(pid)); found { + return nil + } + + now := time.Now() + if p.inBackoff { + if now.Sub(p.backoffStart) > backoffDuration { + p.logger.Warnw("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) + p.inBackoff = false + p.combinedWait = 0 * time.Millisecond + } else { + p.backoffSkipped += 1 + return nil + } + } else { + if p.combinedWait > combinedWaitLimit { + p.logger.Warn("starting backoff") + p.inBackoff = true + p.backoffStart = now + p.backoffSkipped = 0 + return nil + } + // maintain a moving window of time for the delays we track + if now.Sub(p.since) > resetDuration { + p.since = now + p.combinedWait = 0 * time.Millisecond + } + } + + start := now + nextWait := 5 * time.Millisecond + for { + waited := time.Since(start) + if _, found := p.qq.Lookup(int(pid)); found { + p.logger.Debugw("got process that was missing ", "waited", waited) + p.combinedWait = p.combinedWait + waited + return nil + } + if waited >= maxWaitLimit { + p.combinedWait = p.combinedWait + waited + return fmt.Errorf("process %v was not seen after %v", pid, waited) + } + time.Sleep(nextWait) + if nextWait*2+waited > maxWaitLimit { + nextWait = maxWaitLimit - waited + } else { + nextWait = nextWait * 2 + } + } +} + +func (p *prvdr) GetProcess(pid uint32) (*types.Process, error) { + proc, found := p.lookupLocked(pid) + if !found { + return nil, fmt.Errorf("PID %d not found in cache", pid) + } + + interactive := interactiveFromTTY(types.TTYDev{ + Major: proc.Proc.TtyMajor, + Minor: proc.Proc.TtyMinor, + }) + + start := time.Unix(0, int64(proc.Proc.TimeBoot)) + + ret := types.Process{ + PID: proc.Pid, + Start: &start, + Name: basename(proc.Filename), + Executable: proc.Filename, + Args: proc.Cmdline, + WorkingDirectory: proc.Cwd, + Interactive: &interactive, + } + + euid := proc.Proc.Euid + egid := proc.Proc.Egid + ret.User.ID = strconv.FormatUint(uint64(euid), 10) + username, ok := getUserName(ret.User.ID) + if ok { + ret.User.Name = username + } + ret.Group.ID = strconv.FormatUint(uint64(egid), 10) + groupname, ok := getGroupName(ret.Group.ID) + if ok { + ret.Group.Name = groupname + } + ret.TTY.CharDevice.Major = uint16(proc.Proc.TtyMajor) + ret.TTY.CharDevice.Minor = uint16(proc.Proc.TtyMinor) + if proc.Exit.Valid { + end := time.Unix(0, int64(proc.Exit.ExitTimeProcess)) + ret.ExitCode = proc.Exit.ExitCode + ret.End = &end + } + ret.EntityID = calculateEntityIDv1(pid, *ret.Start) + + p.fillParent(&ret, proc.Proc.Ppid) + p.fillGroupLeader(&ret, proc.Proc.Pgid) + p.fillSessionLeader(&ret, proc.Proc.Sid) + p.fillEntryLeader(&ret, proc.Proc.EntryLeader) + setEntityID(&ret) + setSameAsProcess(&ret) + return &ret, nil +} + +func (p prvdr) lookupLocked(pid uint32) (quark.Process, bool) { + p.qqMtx.Lock() + defer p.qqMtx.Unlock() + + return p.qq.Lookup(int(pid)) +} + +func (p prvdr) fillParent(process *types.Process, ppid uint32) { + proc, found := p.lookupLocked(ppid) + if !found { + return + } + + start := time.Unix(0, int64(proc.Proc.TimeBoot)) + interactive := interactiveFromTTY(types.TTYDev{ + Major: proc.Proc.TtyMajor, + Minor: proc.Proc.TtyMinor, + }) + euid := proc.Proc.Euid + egid := proc.Proc.Egid + process.Parent.PID = proc.Pid + process.Parent.Start = &start + process.Parent.Name = basename(proc.Filename) + process.Parent.Executable = proc.Filename + process.Parent.Args = proc.Cmdline + process.Parent.WorkingDirectory = proc.Cwd + process.Parent.Interactive = &interactive + process.Parent.User.ID = strconv.FormatUint(uint64(euid), 10) + username, ok := getUserName(process.Parent.User.ID) + if ok { + process.Parent.User.Name = username + } + process.Parent.Group.ID = strconv.FormatUint(uint64(egid), 10) + groupname, ok := getGroupName(process.Parent.Group.ID) + if ok { + process.Parent.Group.Name = groupname + } + process.Parent.EntityID = calculateEntityIDv1(ppid, *process.Start) +} + +func (p prvdr) fillGroupLeader(process *types.Process, pgid uint32) { + proc, found := p.lookupLocked(pgid) + if !found { + return + } + + start := time.Unix(0, int64(proc.Proc.TimeBoot)) + + interactive := interactiveFromTTY(types.TTYDev{ + Major: proc.Proc.TtyMajor, + Minor: proc.Proc.TtyMinor, + }) + euid := proc.Proc.Euid + egid := proc.Proc.Egid + process.GroupLeader.PID = proc.Pid + process.GroupLeader.Start = &start + process.GroupLeader.Name = basename(proc.Filename) + process.GroupLeader.Executable = proc.Filename + process.GroupLeader.Args = proc.Cmdline + process.GroupLeader.WorkingDirectory = proc.Cwd + process.GroupLeader.Interactive = &interactive + process.GroupLeader.User.ID = strconv.FormatUint(uint64(euid), 10) + username, ok := getUserName(process.GroupLeader.User.ID) + if ok { + process.GroupLeader.User.Name = username + } + process.GroupLeader.Group.ID = strconv.FormatUint(uint64(egid), 10) + groupname, ok := getGroupName(process.GroupLeader.Group.ID) + if ok { + process.GroupLeader.Group.Name = groupname + } + process.GroupLeader.EntityID = calculateEntityIDv1(pgid, *process.GroupLeader.Start) +} + +func (p prvdr) fillSessionLeader(process *types.Process, sid uint32) { + proc, found := p.lookupLocked(sid) + if !found { + return + } + + start := time.Unix(0, int64(proc.Proc.TimeBoot)) + + interactive := interactiveFromTTY(types.TTYDev{ + Major: proc.Proc.TtyMajor, + Minor: proc.Proc.TtyMinor, + }) + euid := proc.Proc.Euid + egid := proc.Proc.Egid + process.SessionLeader.PID = proc.Pid + process.SessionLeader.Start = &start + process.SessionLeader.Name = basename(proc.Filename) + process.SessionLeader.Executable = proc.Filename + process.SessionLeader.Args = proc.Cmdline + process.SessionLeader.WorkingDirectory = proc.Cwd + process.SessionLeader.Interactive = &interactive + process.SessionLeader.User.ID = strconv.FormatUint(uint64(euid), 10) + username, ok := getUserName(process.SessionLeader.User.ID) + if ok { + process.SessionLeader.User.Name = username + } + process.SessionLeader.Group.ID = strconv.FormatUint(uint64(egid), 10) + groupname, ok := getGroupName(process.SessionLeader.Group.ID) + if ok { + process.SessionLeader.Group.Name = groupname + } + process.SessionLeader.EntityID = calculateEntityIDv1(sid, *process.SessionLeader.Start) +} + +func (p prvdr) fillEntryLeader(process *types.Process, elid uint32) { + proc, found := p.lookupLocked(elid) + if !found { + return + } + + start := time.Unix(0, int64(proc.Proc.TimeBoot)) + + interactive := interactiveFromTTY(types.TTYDev{ + Major: proc.Proc.TtyMajor, + Minor: proc.Proc.TtyMinor, + }) + + euid := proc.Proc.Euid + egid := proc.Proc.Egid + process.EntryLeader.PID = proc.Pid + process.EntryLeader.Start = &start + process.EntryLeader.WorkingDirectory = proc.Cwd + process.EntryLeader.Interactive = &interactive + process.EntryLeader.User.ID = strconv.FormatUint(uint64(euid), 10) + username, ok := getUserName(process.EntryLeader.User.ID) + if ok { + process.EntryLeader.User.Name = username + } + process.EntryLeader.Group.ID = strconv.FormatUint(uint64(egid), 10) + groupname, ok := getGroupName(process.EntryLeader.Group.ID) + if ok { + process.EntryLeader.Group.Name = groupname + } + + process.EntryLeader.EntityID = calculateEntityIDv1(elid, *process.EntryLeader.Start) + process.EntryLeader.EntryMeta.Type = getEntryTypeName(proc.Proc.EntryLeaderType) +} + +func setEntityID(process *types.Process) { + if process.PID != 0 && process.Start != nil { + process.EntityID = calculateEntityIDv1(process.PID, *process.Start) + } + + if process.Parent.PID != 0 && process.Parent.Start != nil { + process.Parent.EntityID = calculateEntityIDv1(process.Parent.PID, *process.Parent.Start) + } + + if process.GroupLeader.PID != 0 && process.GroupLeader.Start != nil { + process.GroupLeader.EntityID = calculateEntityIDv1(process.GroupLeader.PID, *process.GroupLeader.Start) + } + + if process.SessionLeader.PID != 0 && process.SessionLeader.Start != nil { + process.SessionLeader.EntityID = calculateEntityIDv1(process.SessionLeader.PID, *process.SessionLeader.Start) + } + + if process.EntryLeader.PID != 0 && process.EntryLeader.Start != nil { + process.EntryLeader.EntityID = calculateEntityIDv1(process.EntryLeader.PID, *process.EntryLeader.Start) + } +} + +func setSameAsProcess(process *types.Process) { + if process.GroupLeader.PID != 0 && process.GroupLeader.Start != nil { + sameAsProcess := process.PID == process.GroupLeader.PID + process.GroupLeader.SameAsProcess = &sameAsProcess + } + + if process.SessionLeader.PID != 0 && process.SessionLeader.Start != nil { + sameAsProcess := process.PID == process.SessionLeader.PID + process.SessionLeader.SameAsProcess = &sameAsProcess + } + + if process.EntryLeader.PID != 0 && process.EntryLeader.Start != nil { + sameAsProcess := process.PID == process.EntryLeader.PID + process.EntryLeader.SameAsProcess = &sameAsProcess + } +} + +func interactiveFromTTY(tty types.TTYDev) bool { + return TTYUnknown != getTTYType(tty.Major, tty.Minor) +} + +func getTTYType(major uint32, minor uint32) TTYType { + if major >= ptsMinMajor && major <= ptsMaxMajor { + return Pts + } + + if ttyMajor == major { + if minor <= consoleMaxMinor { + return TTYConsole + } else if minor > consoleMaxMinor && minor <= ttyMaxMinor { + return TTY + } + } + + return TTYUnknown +} + +func calculateEntityIDv1(pid uint32, startTime time.Time) string { + return base64.StdEncoding.EncodeToString( + []byte( + fmt.Sprintf("%d__%s__%d__%d", + pidNsInode, + bootID, + uint64(pid), + uint64(startTime.Unix()), + ), + ), + ) +} + +// `path.Base` returns a '.' for empty strings, this just special cases that +// situation to return an empty string +func basename(pathStr string) string { + if pathStr == "" { + return "" + } + + return filepath.Base(pathStr) +} + +// getUserName will return the name associated with the user ID, if it exists +func getUserName(id string) (string, bool) { + user, err := user.LookupId(id) + if err != nil { + return "", false + } + return user.Username, true +} + +// getGroupName will return the name associated with the group ID, if it exists +func getGroupName(id string) (string, bool) { + group, err := user.LookupGroupId(id) + if err != nil { + return "", false + } + return group.Name, true +} + +func getEntryTypeName(entryType uint32) string { + switch int(entryType) { + case quark.QUARK_ELT_INIT: + return Init + case quark.QUARK_ELT_SSHD: + return Sshd + case quark.QUARK_ELT_SSM: + return Ssm + case quark.QUARK_ELT_CONTAINER: + return Container + case quark.QUARK_ELT_TERM: + return Terminal + case quark.QUARK_ELT_CONSOLE: + return EntryConsole + case quark.QUARK_ELT_KTHREAD: + return Kthread + default: + return "unknown" + } +} diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go new file mode 100644 index 000000000000..e895a696747d --- /dev/null +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux && !((amd64 || arm64) && cgo) + +package kerneltracingprovider + +import ( + "context" + "fmt" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" + "github.com/elastic/elastic-agent-libs/logp" +) + +type prvdr struct{} + +func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, error) { + return prvdr{}, fmt.Errorf("build type not supported, cgo required") +} + +func (p prvdr) SyncDB(event *beat.Event, pid uint32) error { + return fmt.Errorf("build type not supported") +} + +func (p prvdr) GetProcess(pid uint32) (*types.Process, error) { + return nil, fmt.Errorf("build type not supported") +} diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfs_provider/procfs_provider.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go similarity index 78% rename from x-pack/auditbeat/processors/sessionmd/provider/procfs_provider/procfs_provider.go rename to x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go index 4380bc2ccae4..4934a79fc52c 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfs_provider/procfs_provider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go @@ -4,7 +4,7 @@ //go:build linux -package procfs_provider +package procfsprovider import ( "context" @@ -40,8 +40,12 @@ func NewProvider(ctx context.Context, logger *logp.Logger, db *processdb.DB, rea }, nil } +func (p prvdr) GetProcess(pid uint32) (*types.Process, error) { + return nil, fmt.Errorf("not implemented") +} + // SyncDB will update the process DB with process info from procfs or the event itself -func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { +func (p prvdr) SyncDB(ev *beat.Event, pid uint32) error { syscall, err := ev.GetValue(syscallField) if err != nil { return fmt.Errorf("event not supported, no syscall data") @@ -50,17 +54,17 @@ func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { switch syscall { case "execveat", "execve": pe := types.ProcessExecEvent{} - proc_info, err := s.reader.GetProcess(pid) + procInfo, err := p.reader.GetProcess(pid) if err == nil { - pe.PIDs = proc_info.PIDs - pe.Creds = proc_info.Creds - pe.CTTY = proc_info.CTTY - pe.CWD = proc_info.Cwd - pe.Argv = proc_info.Argv - pe.Env = proc_info.Env - pe.Filename = proc_info.Filename + pe.PIDs = procInfo.PIDs + pe.Creds = procInfo.Creds + pe.CTTY = procInfo.CTTY + pe.CWD = procInfo.Cwd + pe.Argv = procInfo.Argv + pe.Env = procInfo.Env + pe.Filename = procInfo.Filename } else { - s.logger.Warnf("couldn't get process info from proc for pid %v: %v", pid, err) + p.logger.Warnw("couldn't get process info from proc for pid", "pid", pid, "error", err) // If process info couldn't be taken from procfs, populate with as much info as // possible from the event pe.PIDs.Tgid = pid @@ -77,7 +81,7 @@ func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { } pe.PIDs.Ppid = uint32(i) - parent, err = s.db.GetProcess(pe.PIDs.Ppid) + parent, err = p.db.GetProcess(pe.PIDs.Ppid) if err != nil { goto out } @@ -87,10 +91,14 @@ func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { if err != nil { goto out } - pe.CWD = intr.(string) + if str, ok := intr.(string); ok { + pe.CWD = str + } else { + goto out + } out: } - s.db.InsertExec(pe) + p.db.InsertExec(pe) if err != nil { return fmt.Errorf("insert exec to db: %w", err) } @@ -100,7 +108,7 @@ func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { Tgid: pid, }, } - s.db.InsertExit(pe) + p.db.InsertExit(pe) case "setsid": intr, err := ev.Fields.GetValue("auditd.result") if err != nil { @@ -117,7 +125,7 @@ func (s prvdr) SyncDB(ev *beat.Event, pid uint32) error { Sid: pid, }, } - s.db.InsertSetsid(setsid_ev) + p.db.InsertSetsid(setsid_ev) } } return nil diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfs_provider/procfs_provider_test.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go similarity index 99% rename from x-pack/auditbeat/processors/sessionmd/provider/procfs_provider/procfs_provider_test.go rename to x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go index 455cb3c0433a..42f19f488ce4 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfs_provider/procfs_provider_test.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go @@ -4,7 +4,7 @@ //go:build linux -package procfs_provider +package procfsprovider import ( "context" diff --git a/x-pack/auditbeat/processors/sessionmd/provider/provider.go b/x-pack/auditbeat/processors/sessionmd/provider/provider.go index e95da3ec2006..4ac9530cfeaa 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/provider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/provider.go @@ -8,9 +8,11 @@ package provider import ( "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" ) // SyncDB should ensure the DB is in a state to handle the event before returning. type Provider interface { SyncDB(event *beat.Event, pid uint32) error + GetProcess(pid uint32) (*types.Process, error) } diff --git a/x-pack/auditbeat/processors/sessionmd/types/events.go b/x-pack/auditbeat/processors/sessionmd/types/events.go index 5f8d67d763f1..857ab8fa2c10 100644 --- a/x-pack/auditbeat/processors/sessionmd/types/events.go +++ b/x-pack/auditbeat/processors/sessionmd/types/events.go @@ -60,8 +60,8 @@ type TTYTermios struct { } type TTYDev struct { - Minor uint16 - Major uint16 + Minor uint32 + Major uint32 Winsize TTYWinsize Termios TTYTermios } diff --git a/x-pack/auditbeat/processors/sessionmd/types/process.go b/x-pack/auditbeat/processors/sessionmd/types/process.go index 8f52a9c5aa59..a437f35310f3 100644 --- a/x-pack/auditbeat/processors/sessionmd/types/process.go +++ b/x-pack/auditbeat/processors/sessionmd/types/process.go @@ -448,6 +448,9 @@ func (p *Process) ToMap() mapstr.M { if p.EntryLeader.Start != nil { process.Put("entry_leader.start", p.EntryLeader.Start) } + if p.End != nil { + process.Put("end", p.End) + } return process } From 10a2e9437436d9c403ae70aa7e2712fd0cf512bb Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Wed, 16 Oct 2024 16:55:47 +1030 Subject: [PATCH 41/90] x-pack/filebeat/input/entityanalytics/okta/internal: add role and factor client calls (#41044) --- CHANGELOG-developer.next.asciidoc | 1 + .../provider/okta/internal/okta/okta.go | 98 ++++++++++++++++++- .../provider/okta/internal/okta/okta_test.go | 50 ++++++++++ 3 files changed, 147 insertions(+), 2 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 2c1c772c1d81..791f11384c0b 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -209,6 +209,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Simplified Azure Blob Storage input state checkpoint calculation logic. {issue}40674[40674] {pull}40936[40936] - Add field redaction package. {pull}40997[40997] - Add support for marked redaction to x-pack/filebeat/input/internal/private {pull}41212[41212] +- Add support for collecting Okta role and factor data for users with filebeat entityanalytics input. {pull}41044[41044] ==== Deprecated diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta.go b/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta.go index ef574ef4d26a..3d8bdae11c97 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta.go @@ -44,7 +44,7 @@ type User struct { Profile map[string]any `json:"profile"` Credentials *Credentials `json:"credentials,omitempty"` Links HAL `json:"_links,omitempty"` // See https://developer.okta.com/docs/reference/api/users/#links-object for details. - Embedded HAL `json:"_embedded,omitempty"` + Embedded map[string]any `json:"_embedded,omitempty"` } // Credentials is a redacted Okta user's credential details. Only the credential provider is retained. @@ -72,6 +72,37 @@ type Group struct { Profile map[string]any `json:"profile"` } +// Factor is an Okta identity factor description. +// +// See https://developer.okta.com/docs/api/openapi/okta-management/management/tag/UserFactor/#tag/UserFactor/operation/listFactors. +type Factor struct { + ID string `json:"id"` + FactorType string `json:"factorType"` + Provider string `json:"provider"` + VendorName string `json:"vendorName"` + Status string `json:"status"` + Created time.Time `json:"created"` + LastUpdated time.Time `json:"lastUpdated"` + Profile map[string]any `json:"profile"` + Links HAL `json:"_links,omitempty"` + Embedded map[string]any `json:"_embedded,omitempty"` +} + +// Role is an Okta user role description. +// +// See https://developer.okta.com/docs/api/openapi/okta-management/management/tag/RoleAssignmentAUser/#tag/RoleAssignmentAUser/operation/listAssignedRolesForUser +// and https://developer.okta.com/docs/api/openapi/okta-management/management/tag/RoleAssignmentBGroup/#tag/RoleAssignmentBGroup/operation/listGroupAssignedRoles. +type Role struct { + ID string `json:"id"` + Label string `json:"label"` + Type string `json:"type"` + Status string `json:"status"` + Created time.Time `json:"created"` + LastUpdated time.Time `json:"lastUpdated"` + AssignmentType string `json:"assignmentType"` + Links HAL `json:"_links"` +} + // Device is an Okta device's details. // // See https://developer.okta.com/docs/api/openapi/okta-management/management/tag/Device/#tag/Device/operation/listDevices for details @@ -176,6 +207,48 @@ func GetUserDetails(ctx context.Context, cli *http.Client, host, key, user strin return getDetails[User](ctx, cli, u, key, user == "", omit, lim, window, log) } +// GetUserFactors returns Okta group roles using the groups API endpoint. host is the +// Okta user domain and key is the API token to use for the query. group must not be empty. +// +// See GetUserDetails for details of the query and rate limit parameters. +// +// See https://developer.okta.com/docs/api/openapi/okta-management/management/tag/UserFactor/#tag/UserFactor/operation/listFactors. +func GetUserFactors(ctx context.Context, cli *http.Client, host, key, user string, lim *rate.Limiter, window time.Duration, log *logp.Logger) ([]Factor, http.Header, error) { + const endpoint = "/api/v1/users" + + if user == "" { + return nil, nil, errors.New("no user specified") + } + + u := &url.URL{ + Scheme: "https", + Host: host, + Path: path.Join(endpoint, user, "factors"), + } + return getDetails[Factor](ctx, cli, u, key, true, OmitNone, lim, window, log) +} + +// GetUserRoles returns Okta group roles using the groups API endpoint. host is the +// Okta user domain and key is the API token to use for the query. group must not be empty. +// +// See GetUserDetails for details of the query and rate limit parameters. +// +// See https://developer.okta.com/docs/api/openapi/okta-management/management/tag/RoleAssignmentBGroup/#tag/RoleAssignmentBGroup/operation/listGroupAssignedRoles. +func GetUserRoles(ctx context.Context, cli *http.Client, host, key, user string, lim *rate.Limiter, window time.Duration, log *logp.Logger) ([]Role, http.Header, error) { + const endpoint = "/api/v1/users" + + if user == "" { + return nil, nil, errors.New("no user specified") + } + + u := &url.URL{ + Scheme: "https", + Host: host, + Path: path.Join(endpoint, user, "roles"), + } + return getDetails[Role](ctx, cli, u, key, true, OmitNone, lim, window, log) +} + // GetUserGroupDetails returns Okta group details using the users API endpoint. host is the // Okta user domain and key is the API token to use for the query. user must not be empty. // @@ -197,6 +270,27 @@ func GetUserGroupDetails(ctx context.Context, cli *http.Client, host, key, user return getDetails[Group](ctx, cli, u, key, true, OmitNone, lim, window, log) } +// GetGroupRoles returns Okta group roles using the groups API endpoint. host is the +// Okta user domain and key is the API token to use for the query. group must not be empty. +// +// See GetUserDetails for details of the query and rate limit parameters. +// +// See https://developer.okta.com/docs/api/openapi/okta-management/management/tag/RoleAssignmentBGroup/#tag/RoleAssignmentBGroup/operation/listGroupAssignedRoles. +func GetGroupRoles(ctx context.Context, cli *http.Client, host, key, group string, lim *rate.Limiter, window time.Duration, log *logp.Logger) ([]Role, http.Header, error) { + const endpoint = "/api/v1/groups" + + if group == "" { + return nil, nil, errors.New("no group specified") + } + + u := &url.URL{ + Scheme: "https", + Host: host, + Path: path.Join(endpoint, group, "roles"), + } + return getDetails[Role](ctx, cli, u, key, true, OmitNone, lim, window, log) +} + // GetDeviceDetails returns Okta device details using the list devices API endpoint. host is the // Okta user domain and key is the API token to use for the query. If device is not empty, // details for the specific device are returned, otherwise a list of all devices is returned. @@ -250,7 +344,7 @@ func GetDeviceUsers(ctx context.Context, cli *http.Client, host, key, device str // entity is an Okta entity analytics entity. type entity interface { - User | Group | Device | devUser + User | Group | Role | Factor | Device | devUser } type devUser struct { diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta_test.go b/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta_test.go index 2ce439252210..9b04d3996bf9 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/internal/okta/okta_test.go @@ -116,6 +116,56 @@ func Test(t *testing.T) { t.Logf("groups: %s", b) }) + t.Run("my_roles", func(t *testing.T) { + query := make(url.Values) + query.Set("limit", "200") + roles, _, err := GetUserRoles(context.Background(), http.DefaultClient, host, key, me.ID, limiter, window, logger) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(roles) == 0 { + t.Fatalf("unexpected len(roles): got:%d want>0", len(roles)) + } + + if omit&OmitCredentials != 0 && me.Credentials != nil { + t.Errorf("unexpected credentials with %s: %#v", omit, me.Credentials) + } + + if !*logResponses { + return + } + b, err := json.Marshal(roles) + if err != nil { + t.Errorf("failed to marshal roles for logging: %v", err) + } + t.Logf("roles: %s", b) + }) + + t.Run("my_factors", func(t *testing.T) { + query := make(url.Values) + query.Set("limit", "200") + factors, _, err := GetUserFactors(context.Background(), http.DefaultClient, host, key, me.ID, limiter, window, logger) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(factors) == 0 { + t.Fatalf("unexpected len(factors): got:%d want>0", len(factors)) + } + + if omit&OmitCredentials != 0 && me.Credentials != nil { + t.Errorf("unexpected credentials with %s: %#v", omit, me.Credentials) + } + + if !*logResponses { + return + } + b, err := json.Marshal(factors) + if err != nil { + t.Errorf("failed to marshal factors for logging: %v", err) + } + t.Logf("factors: %s", b) + }) + t.Run("user", func(t *testing.T) { login, _ := me.Profile["login"].(string) if login == "" { From e2815a49c107e3fa1cd999e0689c4a10881318bb Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 16 Oct 2024 08:24:25 -0400 Subject: [PATCH 42/90] Correctly set 'input.type' when using the system integration (#41246) The system integration now uses the new input system-logs that is just a proxy to instantiate either the log input or journald. That caused the 'input.type' in the final event to contain 'system-logs' instead of 'log' or 'journald'. This PR fixes it buy adding a set step in the ingest pipeline. The ingest pipeline is used because Beats sets this filed outside of the input code. Co-authored-by: Pierre HILBERT --- CHANGELOG-developer.next.asciidoc | 1 + filebeat/input/systemlogs/input.go | 8 + filebeat/module/system/auth/ingest/files.yml | 3 + .../module/system/auth/ingest/journald.yml | 3 + .../test/auth-ubuntu1204.log-expected.json | 200 +++++++++--------- .../auth/test/debian-12.journal-expected.json | 18 +- .../auth/test/secure-rhel7.log-expected.json | 200 +++++++++--------- .../system/auth/test/test.log-expected.json | 22 +- .../auth/test/timestamp.log-expected.json | 4 +- .../module/system/syslog/ingest/files.yml | 3 + .../module/system/syslog/ingest/journald.yml | 3 + .../darwin-syslog-sample.log-expected.json | 6 +- .../test/darwin-syslog.log-expected.json | 200 +++++++++--------- .../test/debian-12.journal-expected.json | 6 +- .../syslog/test/suse-syslog.log-expected.json | 4 +- .../syslog/test/tz-offset.log-expected.json | 6 +- 16 files changed, 354 insertions(+), 333 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 791f11384c0b..01a7205e713c 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -70,6 +70,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Debug log entries from the acker (`stateful ack ...` or `stateless ack ...`) removed. {pull}39672[39672] - Rename x-pack/filebeat websocket input to streaming. {issue}40264[40264] {pull}40421[40421] - Journald input now calls `journalctl` instead of using `github.com/coreos/go-systemd/v22@v22.5.0/sdjournal`, the CGO dependency has been removed from Filebeat {pull}40061[40061] +- System module events now contain `input.type: systemlogs` instead of `input.type: log` when harvesting log files, however the ingest pipeline sets it back to the original input (log or journald). {pull}41246[41246] ==== Bugfixes diff --git a/filebeat/input/systemlogs/input.go b/filebeat/input/systemlogs/input.go index 789fd65ad5d9..05cd541d81c1 100644 --- a/filebeat/input/systemlogs/input.go +++ b/filebeat/input/systemlogs/input.go @@ -115,16 +115,20 @@ func PluginV2(logger *logp.Logger, store cursor.StateStore) v2.Plugin { // return false // - Otherwise return true func useJournald(c *conf.C) (bool, error) { + logger := logp.L().Named("input.system-logs") + cfg := config{} if err := c.Unpack(&cfg); err != nil { return false, nil } if cfg.UseJournald { + logger.Info("using journald input because 'use_journald' is set") return true, nil } if cfg.UseFiles { + logger.Info("using log input because 'use_files' is set") return false, nil } @@ -144,6 +148,9 @@ func useJournald(c *conf.C) (bool, error) { if len(paths) != 0 { // We found at least one system log file, // journald will not be used, return early + logger.Info( + "using log input because file(s) was(were) found when testing glob '%s'", + g) return false, nil } } @@ -230,5 +237,6 @@ func toFilesConfig(cfg *conf.C) (*conf.C, error) { if err := cfg.SetString("type", -1, pluginName); err != nil { return nil, fmt.Errorf("cannot set type back to '%s': %w", pluginName, err) } + return newCfg, nil } diff --git a/filebeat/module/system/auth/ingest/files.yml b/filebeat/module/system/auth/ingest/files.yml index fbeebc12b7e2..557747b6400a 100644 --- a/filebeat/module/system/auth/ingest/files.yml +++ b/filebeat/module/system/auth/ingest/files.yml @@ -54,6 +54,9 @@ processors: value: '{{{ _ingest.on_failure_message }}}' - remove: field: system.auth.timestamp + - set: + field: input.type + value: log on_failure: - set: field: error.message diff --git a/filebeat/module/system/auth/ingest/journald.yml b/filebeat/module/system/auth/ingest/journald.yml index aee3f5263ede..bb43dd63cf59 100644 --- a/filebeat/module/system/auth/ingest/journald.yml +++ b/filebeat/module/system/auth/ingest/journald.yml @@ -24,6 +24,9 @@ processors: - syslog - systemd - message_id + - set: + field: input.type + value: journald on_failure: - set: field: error.message diff --git a/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json b/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json index 6e2ffbeaa514..ee5afe3f2356 100644 --- a/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json +++ b/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 0, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -27,7 +27,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 81, "process.name": "sudo", "related.hosts": [ @@ -52,7 +52,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 464, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -75,7 +75,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 570, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -95,7 +95,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 655, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -116,7 +116,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 736, "process.name": "sudo", "related.hosts": [ @@ -141,7 +141,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1121, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -164,7 +164,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1227, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -184,7 +184,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1312, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -205,7 +205,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1393, "process.name": "sudo", "related.hosts": [ @@ -230,7 +230,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1776, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -253,7 +253,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1882, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -273,7 +273,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1967, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -294,7 +294,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2048, "process.name": "sudo", "related.hosts": [ @@ -319,7 +319,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2426, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -342,7 +342,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2532, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -362,7 +362,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2617, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -383,7 +383,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2698, "process.name": "sudo", "related.hosts": [ @@ -408,7 +408,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3083, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -431,7 +431,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3189, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -451,7 +451,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3274, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -471,7 +471,7 @@ "event.module": "system", "event.timezone": "-02:00", "fileset.name": "auth", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3355, "message": "last message repeated 2 times", "process.name": "sshd", @@ -485,7 +485,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3414, "process.name": "sudo", "related.hosts": [ @@ -510,7 +510,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3977, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -533,7 +533,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4083, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -553,7 +553,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4168, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -574,7 +574,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4249, "process.name": "sudo", "related.hosts": [ @@ -599,7 +599,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4632, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -622,7 +622,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4738, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -642,7 +642,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4823, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -663,7 +663,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4904, "process.name": "sudo", "related.hosts": [ @@ -688,7 +688,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5289, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -711,7 +711,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5395, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -731,7 +731,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5480, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -752,7 +752,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5561, "process.name": "sudo", "related.hosts": [ @@ -777,7 +777,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5942, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -800,7 +800,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6048, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -820,7 +820,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6133, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -841,7 +841,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6214, "process.name": "sudo", "related.hosts": [ @@ -866,7 +866,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6597, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -889,7 +889,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6703, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -909,7 +909,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6788, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -930,7 +930,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6869, "process.name": "sudo", "related.hosts": [ @@ -955,7 +955,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7254, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -978,7 +978,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7360, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -998,7 +998,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7445, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1019,7 +1019,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7526, "process.name": "sudo", "related.hosts": [ @@ -1044,7 +1044,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7911, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1067,7 +1067,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8017, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1087,7 +1087,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8102, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1108,7 +1108,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8183, "process.name": "sudo", "related.hosts": [ @@ -1133,7 +1133,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8564, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1156,7 +1156,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8670, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1176,7 +1176,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8755, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1197,7 +1197,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8836, "process.name": "sudo", "related.hosts": [ @@ -1222,7 +1222,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9215, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1245,7 +1245,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9321, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1265,7 +1265,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9406, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1286,7 +1286,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9487, "process.name": "sudo", "related.hosts": [ @@ -1311,7 +1311,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9869, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1334,7 +1334,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9975, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1354,7 +1354,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10060, "process.name": "sudo", "related.hosts": [ @@ -1379,7 +1379,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11099, "message": "vagrant : (command continued) '/etc/metricbeat/metricbeat.yml)", "process.name": "sudo", @@ -1395,7 +1395,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11195, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1418,7 +1418,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11301, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1438,7 +1438,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11386, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1459,7 +1459,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11467, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1480,7 +1480,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11548, "process.name": "sudo", "related.hosts": [ @@ -1505,7 +1505,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11928, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1528,7 +1528,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12034, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1548,7 +1548,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12119, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1569,7 +1569,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12200, "process.name": "sudo", "related.hosts": [ @@ -1594,7 +1594,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12583, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1617,7 +1617,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12689, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1637,7 +1637,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12774, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1658,7 +1658,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12855, "process.name": "sudo", "related.hosts": [ @@ -1683,7 +1683,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 13241, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1706,7 +1706,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 13347, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1726,7 +1726,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 13432, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1747,7 +1747,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 13513, "process.name": "sudo", "related.hosts": [ @@ -1772,7 +1772,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 13898, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1795,7 +1795,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14004, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1815,7 +1815,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14089, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1836,7 +1836,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14170, "process.name": "sudo", "related.hosts": [ @@ -1861,7 +1861,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14549, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1884,7 +1884,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14655, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1904,7 +1904,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14740, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -1925,7 +1925,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14821, "process.name": "sudo", "related.hosts": [ @@ -1950,7 +1950,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15203, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -1973,7 +1973,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15309, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -1993,7 +1993,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15394, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -2014,7 +2014,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15475, "process.name": "sudo", "related.hosts": [ @@ -2039,7 +2039,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15860, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -2062,7 +2062,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15966, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -2082,7 +2082,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16051, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -2103,7 +2103,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16132, "process.name": "sudo", "related.hosts": [ @@ -2128,7 +2128,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16517, "message": "pam_unix(sudo:session): session opened for user root by vagrant(uid=1000)", "process.name": "sudo", @@ -2151,7 +2151,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16623, "message": "pam_unix(sudo:session): session closed for user root", "process.name": "sudo", @@ -2171,7 +2171,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16708, "message": "subsystem request for sftp by user vagrant", "process.name": "sshd", @@ -2192,7 +2192,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16789, "process.name": "sudo", "related.hosts": [ diff --git a/filebeat/module/system/auth/test/debian-12.journal-expected.json b/filebeat/module/system/auth/test/debian-12.journal-expected.json index ee0d8a69ba02..2ef69b76b22a 100644 --- a/filebeat/module/system/auth/test/debian-12.journal-expected.json +++ b/filebeat/module/system/auth/test/debian-12.journal-expected.json @@ -16,7 +16,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Accepted publickey for vagrant from 10.0.2.2 port 48274 ssh2: ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", @@ -64,7 +64,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Accepted password for vagrant from 192.168.42.119 port 55310 ssh2", @@ -110,7 +110,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Invalid user test from 192.168.42.119 port 48890", @@ -154,7 +154,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", @@ -200,7 +200,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", @@ -246,7 +246,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", @@ -284,7 +284,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 10, "log.syslog.priority": 5, "message": " vagrant : TTY=pts/2 ; PWD=/home/vagrant ; USER=root ; COMMAND=/usr/bin/emacs /etc/ssh/sshd_config", @@ -332,7 +332,7 @@ "group.name": "test", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 10, "log.syslog.priority": 6, "message": "new group: name=test, GID=1001", @@ -362,7 +362,7 @@ "fileset.name": "auth", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 4, "log.syslog.priority": 6, "message": "Session 8 logged out. Waiting for processes to exit.", diff --git a/filebeat/module/system/auth/test/secure-rhel7.log-expected.json b/filebeat/module/system/auth/test/secure-rhel7.log-expected.json index 71cd8657c7bf..731b4db0423b 100644 --- a/filebeat/module/system/auth/test/secure-rhel7.log-expected.json +++ b/filebeat/module/system/auth/test/secure-rhel7.log-expected.json @@ -14,7 +14,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 0, "process.name": "sshd", "process.pid": 2738, @@ -47,7 +47,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 97, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -76,7 +76,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 209, "process.name": "sshd", "process.pid": 2738, @@ -109,7 +109,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 306, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -138,7 +138,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 418, "process.name": "sshd", "process.pid": 2738, @@ -171,7 +171,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 515, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -188,7 +188,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 618, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -205,7 +205,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 760, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -222,7 +222,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 842, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -239,7 +239,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 993, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -268,7 +268,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1105, "process.name": "sshd", "process.pid": 2742, @@ -301,7 +301,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1202, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -330,7 +330,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1314, "process.name": "sshd", "process.pid": 2742, @@ -363,7 +363,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1411, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -392,7 +392,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1523, "process.name": "sshd", "process.pid": 2742, @@ -425,7 +425,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1620, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -454,7 +454,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1732, "process.name": "sshd", "process.pid": 2742, @@ -487,7 +487,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1829, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -516,7 +516,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1941, "process.name": "sshd", "process.pid": 2742, @@ -549,7 +549,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2038, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -566,7 +566,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2141, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -583,7 +583,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2283, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -600,7 +600,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2365, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -617,7 +617,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2516, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -638,7 +638,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2628, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -655,7 +655,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2777, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -684,7 +684,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2889, "process.name": "sshd", "process.pid": 2754, @@ -717,7 +717,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2986, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -746,7 +746,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3098, "process.name": "sshd", "process.pid": 2758, @@ -783,7 +783,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3194, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -812,7 +812,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3306, "process.name": "sshd", "process.pid": 2754, @@ -845,7 +845,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3403, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -874,7 +874,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3515, "process.name": "sshd", "process.pid": 2758, @@ -911,7 +911,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3611, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -940,7 +940,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3723, "process.name": "sshd", "process.pid": 2754, @@ -973,7 +973,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3820, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1002,7 +1002,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3932, "process.name": "sshd", "process.pid": 2758, @@ -1039,7 +1039,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4028, "message": "Received disconnect from 216.160.83.58: 11: [preauth]", "process.name": "sshd", @@ -1056,7 +1056,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4119, "message": "PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -1081,7 +1081,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4259, "process.name": "sshd", "process.pid": 2754, @@ -1114,7 +1114,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4356, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1143,7 +1143,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4468, "process.name": "sshd", "process.pid": 2754, @@ -1176,7 +1176,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4565, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -1193,7 +1193,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4668, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1210,7 +1210,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4810, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -1227,7 +1227,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 4892, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1244,7 +1244,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5043, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1273,7 +1273,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5155, "process.name": "sshd", "process.pid": 2762, @@ -1306,7 +1306,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5252, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1335,7 +1335,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5364, "process.name": "sshd", "process.pid": 2762, @@ -1368,7 +1368,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5461, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1397,7 +1397,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5573, "process.name": "sshd", "process.pid": 2762, @@ -1430,7 +1430,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5670, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1459,7 +1459,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5782, "process.name": "sshd", "process.pid": 2762, @@ -1492,7 +1492,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5879, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1521,7 +1521,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5991, "process.name": "sshd", "process.pid": 2762, @@ -1554,7 +1554,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6088, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -1571,7 +1571,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6191, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1588,7 +1588,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6333, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -1605,7 +1605,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6415, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1622,7 +1622,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6566, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1651,7 +1651,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6678, "process.name": "sshd", "process.pid": 2766, @@ -1684,7 +1684,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6775, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1713,7 +1713,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6887, "process.name": "sshd", "process.pid": 2766, @@ -1746,7 +1746,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6984, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1775,7 +1775,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7096, "process.name": "sshd", "process.pid": 2766, @@ -1808,7 +1808,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7193, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1837,7 +1837,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7305, "process.name": "sshd", "process.pid": 2766, @@ -1870,7 +1870,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7402, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -1899,7 +1899,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7514, "process.name": "sshd", "process.pid": 2766, @@ -1932,7 +1932,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7611, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -1949,7 +1949,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7714, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -1966,7 +1966,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7856, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -1983,7 +1983,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7938, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -2000,7 +2000,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8087, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2029,7 +2029,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8199, "process.name": "sshd", "process.pid": 2778, @@ -2066,7 +2066,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8295, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2095,7 +2095,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8407, "process.name": "sshd", "process.pid": 2778, @@ -2132,7 +2132,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8503, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2161,7 +2161,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8615, "process.name": "sshd", "process.pid": 2778, @@ -2198,7 +2198,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8711, "message": "Received disconnect from 216.160.83.58: 11: [preauth]", "process.name": "sshd", @@ -2215,7 +2215,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8802, "message": "PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=216.160.83.58 user=root", "process.name": "sshd", @@ -2232,7 +2232,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8942, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -2249,7 +2249,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9093, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2278,7 +2278,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9205, "process.name": "sshd", "process.pid": 2785, @@ -2311,7 +2311,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9302, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2340,7 +2340,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9414, "process.name": "sshd", "process.pid": 2785, @@ -2373,7 +2373,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9511, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2402,7 +2402,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9623, "process.name": "sshd", "process.pid": 2785, @@ -2435,7 +2435,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9720, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2464,7 +2464,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9832, "process.name": "sshd", "process.pid": 2785, @@ -2497,7 +2497,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9929, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2526,7 +2526,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10041, "process.name": "sshd", "process.pid": 2785, @@ -2559,7 +2559,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10138, "message": "fatal: Read from socket failed: Connection reset by peer [preauth]", "process.name": "sshd", @@ -2576,7 +2576,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10241, "message": "PAM 4 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -2593,7 +2593,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10383, "message": "PAM service(sshd) ignoring max retries; 5 > 3", "process.name": "sshd", @@ -2610,7 +2610,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10465, "message": "pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=202.196.224.106 user=root", "process.name": "sshd", @@ -2627,7 +2627,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10616, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", @@ -2656,7 +2656,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10728, "process.name": "sshd", "process.pid": 2797, @@ -2689,7 +2689,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 10825, "message": "pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"", "process.name": "sshd", diff --git a/filebeat/module/system/auth/test/test.log-expected.json b/filebeat/module/system/auth/test/test.log-expected.json index bfe6a5c44b0c..16e859a66d36 100644 --- a/filebeat/module/system/auth/test/test.log-expected.json +++ b/filebeat/module/system/auth/test/test.log-expected.json @@ -15,7 +15,7 @@ ], "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 0, "process.name": "sshd", "process.pid": 3402, @@ -53,7 +53,7 @@ ], "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 152, "process.name": "sshd", "process.pid": 7483, @@ -89,7 +89,7 @@ ], "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 254, "process.name": "sshd", "process.pid": 3430, @@ -123,7 +123,7 @@ ], "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 324, "process.name": "sshd", "process.pid": 5774, @@ -160,7 +160,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 420, "process.name": "sudo", "related.hosts": [ @@ -185,7 +185,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", - "input.type": "system-logs", + "input.type": "log", "log.offset": 522, "process.name": "sshd", "process.pid": 18406, @@ -214,7 +214,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 616, "process.name": "sudo", "related.hosts": [ @@ -239,7 +239,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", - "input.type": "system-logs", + "input.type": "log", "log.offset": 735, "process.name": "sudo", "related.hosts": [ @@ -275,7 +275,7 @@ "group.id": "48", "group.name": "apache", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 860, "process.name": "groupadd", "process.pid": 6991, @@ -300,7 +300,7 @@ "fileset.name": "auth", "group.id": "48", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 933, "process.name": "useradd", "process.pid": 6995, @@ -323,7 +323,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1056, "process.name": "sshd", "process.pid": 10161, diff --git a/filebeat/module/system/auth/test/timestamp.log-expected.json b/filebeat/module/system/auth/test/timestamp.log-expected.json index 52b028dd3b00..fd083732af6c 100644 --- a/filebeat/module/system/auth/test/timestamp.log-expected.json +++ b/filebeat/module/system/auth/test/timestamp.log-expected.json @@ -7,7 +7,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.file.path": "timestamp.log", "log.offset": 0, "message": "pam_unix(sudo-i:session): session opened for user root by userauth3(uid=0)", @@ -32,7 +32,7 @@ "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.file.path": "timestamp.log", "log.offset": 118, "message": "user nobody logged out.", diff --git a/filebeat/module/system/syslog/ingest/files.yml b/filebeat/module/system/syslog/ingest/files.yml index b1352f2ad620..f4f5930e1984 100644 --- a/filebeat/module/system/syslog/ingest/files.yml +++ b/filebeat/module/system/syslog/ingest/files.yml @@ -59,6 +59,9 @@ processors: value: "{{host.hostname}}" if: "ctx.host?.hostname != null && ctx.host?.hostname != ''" allow_duplicates: false +- set: + field: input.type + value: log on_failure: - set: field: error.message diff --git a/filebeat/module/system/syslog/ingest/journald.yml b/filebeat/module/system/syslog/ingest/journald.yml index 5d011784154a..68400c8f5071 100644 --- a/filebeat/module/system/syslog/ingest/journald.yml +++ b/filebeat/module/system/syslog/ingest/journald.yml @@ -23,6 +23,9 @@ processors: - syslog - systemd - message_id +- set: + field: input.type + value: journald on_failure: - set: field: error.message diff --git a/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json b/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json index eb8947f85c13..a5957f19b948 100644 --- a/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json +++ b/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -26,7 +26,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 907, "message": "2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'.", "process.name": "GoogleSoftwareUpdateAgent", @@ -42,7 +42,7 @@ "event.module": "system", "event.timezone": "-02:00", "fileset.name": "syslog", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1176, "message": "--- last message repeated 1 time ---", "service.type": "system" diff --git a/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json b/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json index a1620750ff15..6f12a7a5656c 100644 --- a/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json +++ b/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 0, "message": "2016-12-13 11:35:28.419 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp performSelfUpdateWithEngine:] Finished self update check.", "process.name": "GoogleSoftwareUpdateAgent", @@ -23,7 +23,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -43,7 +43,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1127, "message": "2016-12-13 11:35:28.421 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine updateAllExceptProduct:] KSUpdateEngine updating all installed products, except:'com.google.Keystone'.", "process.name": "GoogleSoftwareUpdateAgent", @@ -60,7 +60,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 1396, "message": "2016-12-13 11:35:28.422 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSCheckAction performAction] KSCheckAction checking 2 ticket(s).", "process.name": "GoogleSoftwareUpdateAgent", @@ -77,7 +77,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -97,7 +97,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 2833, "message": "2016-12-13 11:35:28.446 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] +[KSCodeSigningVerification verifyBundle:applicationId:error:] KSCodeSigningVerification verifying code signing for '/Applications/Google Chrome.app' with the requirement 'anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] exists and certificate leaf[field.1.2.840.113635.100.6.1.13] exists and certificate leaf[subject.OU]=\"EQHXZ8M8AV\" and (identifier=\"com.google.Chrome\")'", "process.name": "GoogleSoftwareUpdateAgent", @@ -114,7 +114,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 3377, "message": "2016-12-13 11:35:29.430 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] +[KSCodeSigningVerification verifyBundle:applicationId:error:] KSCodeSigningVerification verifying code signing for '/Applications/Google Drive.app' with the requirement 'anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] exists and certificate leaf[field.1.2.840.113635.100.6.1.13] exists and certificate leaf[subject.OU]=\"EQHXZ8M8AV\" and (identifier=\"com.google.GoogleDrive\")'", "process.name": "GoogleSoftwareUpdateAgent", @@ -131,7 +131,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -151,7 +151,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 5675, "message": "2016-12-13 11:35:30.116 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher start fetch from URL: \"https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822\"", "process.name": "GoogleSoftwareUpdateAgent", @@ -168,7 +168,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6055, "message": "2016-12-13 11:35:30.117 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) launchedHelperTaskForToolPath:error:] KSOutOfProcessFetcher launched '/Users/tsg/Library/Google/GoogleSoftwareUpdate/GoogleSoftwareUpdate.bundle/Contents/MacOS/ksfetch' with process id: 21414", "process.name": "GoogleSoftwareUpdateAgent", @@ -185,7 +185,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6436, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher sending both request and download file location to the helper.", "process.name": "GoogleSoftwareUpdateAgent", @@ -202,7 +202,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6719, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] KSSendAllDataToHelper() KSHelperTool wrote 2383 bytes to the helper input.", "process.name": "GoogleSoftwareUpdateAgent", @@ -219,7 +219,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 6943, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] Closing the file handle.", "process.name": "GoogleSoftwareUpdateAgent", @@ -236,7 +236,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7166, "message": "2016-12-13 11:35:30.118 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher beginFetchWithDelegate:] KSOutOfProcessFetcher fetching from URL: \"https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822\"", "process.name": "GoogleSoftwareUpdateAgent", @@ -253,7 +253,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7543, "message": "2016-12-13 11:35:30.149 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] KSHelperReceiveAllData() KSHelperTool read 2383 bytes from stdin.", "process.name": "ksfetch", @@ -270,7 +270,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 7722, "message": "2016-12-13 11:35:30.151 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher received a request: { URL: https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822 }", "process.name": "ksfetch", @@ -287,7 +287,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8050, "message": "2016-12-13 11:35:30.151 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher received a download path: /tmp/KSOutOfProcessFetcher.QTqOLkktQz/download", "process.name": "ksfetch", @@ -304,7 +304,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8251, "message": "2016-12-13 11:35:30.152 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() ksfetch fetching URL ( { URL: https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822 }) to folder:/tmp/KSOutOfProcessFetcher.QTqOLkktQz/download", "process.name": "ksfetch", @@ -321,7 +321,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8631, "message": "2016-12-13 11:35:30.152 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Setting up download file handles...", "process.name": "ksfetch", @@ -338,7 +338,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8787, "message": "2016-12-13 11:35:30.348 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] -[FetchDelegate fetcher:finishedWithData:] Fetcher downloaded successfully data of length: 0", "process.name": "ksfetch", @@ -355,7 +355,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 8993, "message": "2016-12-13 11:35:30.348 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() ksfetch done fetching.", "process.name": "ksfetch", @@ -372,7 +372,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9136, "message": "2016-12-13 11:35:30.351 ksfetch[21414/0x7fffcc3f93c0] [lvl=2] main() Fetcher is exiting.", "process.name": "ksfetch", @@ -389,7 +389,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -409,7 +409,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 9540, "message": "2016-12-13 11:35:30.354 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOutOfProcessFetcher(PrivateMethods) helperDidTerminate:] KSOutOfProcessFetcher fetch ended for URL: \"https://tools.google.com/service/update2?cup2hreq=423332d883f010d5b10e169646ed851278047f76e6c5d4dbfa2233ef66e3b141&cup2key=6:1566315822\"", "process.name": "GoogleSoftwareUpdateAgent", @@ -426,7 +426,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -446,7 +446,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11060, "message": "2016-12-13 11:35:30.356 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSOmahaServer updateInfosForUpdateResponse:updateRequest:infoStore:upToDateTickets:updatedTickets:events:errors:] Response passed CUP validation.", "process.name": "GoogleSoftwareUpdateAgent", @@ -463,7 +463,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11357, "message": "2016-12-13 11:35:30.381 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateCheckAction(PrivateMethods) finishAction] KSUpdateCheckAction found updates: {( )}", "process.name": "GoogleSoftwareUpdateAgent", @@ -480,7 +480,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11599, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSPrefetchAction performAction] KSPrefetchAction no updates to prefetch.", "process.name": "GoogleSoftwareUpdateAgent", @@ -497,7 +497,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 11823, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSMultiUpdateAction performAction] KSSilentUpdateAction had no updates to apply.", "process.name": "GoogleSoftwareUpdateAgent", @@ -514,7 +514,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12055, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSMultiUpdateAction performAction] KSPromptAction had no updates to apply.", "process.name": "GoogleSoftwareUpdateAgent", @@ -531,7 +531,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12281, "message": "2016-12-13 11:35:30.384 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp(KeystoneDelegate) updateEngineFinishedWithErrors:] Keystone finished: errors=0", "process.name": "GoogleSoftwareUpdateAgent", @@ -548,7 +548,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 12522, "message": "2016-12-13 11:35:30.385 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSUpdateEngine(PrivateMethods) updateFinish] KSUpdateEngine update processing complete.", "process.name": "GoogleSoftwareUpdateAgent", @@ -565,7 +565,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -585,7 +585,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 13788, "message": "2016-12-13 11:35:31.302 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentUploader fetcher:finishedWithData:] Successfully uploaded stats to { URL: https://tools.google.com/service/update2 }", "process.name": "GoogleSoftwareUpdateAgent", @@ -602,7 +602,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.flags": [ "multiline" ], @@ -622,7 +622,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14537, "message": "2016-12-13 11:35:32.508 GoogleSoftwareUpdateAgent[21412/0x700007399000] [lvl=2] -[KSAgentApp(KeystoneThread) runKeystonesInThreadWithArg:] Finished with engine thread", "process.name": "GoogleSoftwareUpdateAgent", @@ -639,7 +639,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14773, "message": "2016-12-13 11:35:32.825 GoogleSoftwareUpdateAgent[21412/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp checkForUpdates] Finished update check.", "process.name": "GoogleSoftwareUpdateAgent", @@ -656,7 +656,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 14975, "message": "objc[85294]: __weak variable at 0x60000a8499d0 holds 0x2121212121212121 instead of 0x600006a22fa0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -673,7 +673,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15238, "message": "objc[85294]: __weak variable at 0x60800f047240 holds 0x2121212121212121 instead of 0x608002231220. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -690,7 +690,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15501, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21498])", @@ -706,7 +706,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15716, "message": "objc[85294]: __weak variable at 0x60000a256990 holds 0x2121212121212121 instead of 0x600006a22420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -723,7 +723,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 15979, "message": "objc[85294]: __weak variable at 0x6080096475d0 holds 0x2121212121212121 instead of 0x608004e21280. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -740,7 +740,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16242, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -757,7 +757,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16312, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21556])", @@ -773,7 +773,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16527, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -789,7 +789,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16689, "message": "objc[85294]: __weak variable at 0x60000a85a860 holds 0x2121212121212121 instead of 0x600004a3b9a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -806,7 +806,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 16952, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21581])", @@ -822,7 +822,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 17167, "message": "objc[85294]: __weak variable at 0x608009840580 holds 0x2121212121212121 instead of 0x608004a22940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -839,7 +839,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 17430, "message": "objc[85294]: __weak variable at 0x608009c5b700 holds 0x2121212121212121 instead of 0x608005830020. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -856,7 +856,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 17693, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21586])", @@ -872,7 +872,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 17908, "message": "objc[85294]: __weak variable at 0x60800ee592d0 holds 0x2121212121212121 instead of 0x608005627220. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -889,7 +889,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 18171, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -906,7 +906,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 18241, "message": "objc[85294]: __weak variable at 0x60000c648290 holds 0x2121212121212121 instead of 0x6000050242a0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -923,7 +923,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 18504, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21589])", @@ -939,7 +939,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 18719, "message": "objc[85294]: __weak variable at 0x600009840460 holds 0x2121212121212121 instead of 0x60000122e940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -956,7 +956,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 18982, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -972,7 +972,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 19144, "message": "objc[85294]: __weak variable at 0x60000ee5b730 holds 0x2121212121212121 instead of 0x600007821c20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -989,7 +989,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 19407, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21946])", @@ -1005,7 +1005,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 19622, "message": "objc[85294]: __weak variable at 0x600006a49940 holds 0x2121212121212121 instead of 0x6000078202e0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1022,7 +1022,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 19885, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1039,7 +1039,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 19955, "message": "Invoked notification with id: d63743fb-f17b-4e9e-97d0-88e0e7304682", "process.name": "Slack Helper", @@ -1056,7 +1056,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 20078, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21966])", @@ -1072,7 +1072,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 20293, "message": "objc[85294]: __weak variable at 0x60800f043dc0 holds 0x2121212121212121 instead of 0x6080026228c0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1089,7 +1089,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 20556, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[21981])", @@ -1105,7 +1105,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 20771, "message": "objc[85294]: __weak variable at 0x608009a53600 holds 0x2121212121212121 instead of 0x608000629420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1122,7 +1122,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 21034, "message": "objc[85294]: __weak variable at 0x60800f259c30 holds 0x2121212121212121 instead of 0x608004a21c20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1139,7 +1139,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 21297, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1156,7 +1156,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 21367, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22226])", @@ -1172,7 +1172,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 21582, "message": "objc[85294]: __weak variable at 0x60000c647d80 holds 0x2121212121212121 instead of 0x600006e3ee80. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1189,7 +1189,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 21845, "message": "objc[85294]: __weak variable at 0x60800f053a80 holds 0x2121212121212121 instead of 0x608007227ce0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1206,7 +1206,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 22108, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22241])", @@ -1222,7 +1222,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 22323, "message": "objc[85294]: __weak variable at 0x60000a64ce80 holds 0x2121212121212121 instead of 0x600006629940. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1239,7 +1239,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 22586, "message": "objc[85294]: __weak variable at 0x60000a843580 holds 0x2121212121212121 instead of 0x600006629540. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1256,7 +1256,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 22849, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22254])", @@ -1272,7 +1272,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 23064, "message": "objc[85294]: __weak variable at 0x60800f45b910 holds 0x2121212121212121 instead of 0x608005822c40. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1289,7 +1289,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 23327, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1306,7 +1306,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 23397, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -1322,7 +1322,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 23559, "message": "objc[85294]: __weak variable at 0x60000ea5edf0 holds 0x2121212121212121 instead of 0x600003a35a60. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1339,7 +1339,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 23822, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22265])", @@ -1355,7 +1355,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 24037, "message": "Invoked notification with id: 52bf37d9-0c4e-4276-8789-9fc7704bdf5b", "process.name": "Slack Helper", @@ -1372,7 +1372,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 24160, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22292])", @@ -1388,7 +1388,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 24375, "message": "Invoked notification with id: c6c7e356-60a7-4b9e-a9b1-ecc2b8ad09f2", "process.name": "Slack Helper", @@ -1405,7 +1405,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 24498, "message": "objc[85294]: __weak variable at 0x60800f246430 holds 0x2121212121212121 instead of 0x608001c26d00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1422,7 +1422,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 24761, "message": "objc[85294]: __weak variable at 0x60800c85fd80 holds 0x2121212121212121 instead of 0x608005a3a420. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1439,7 +1439,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 25024, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1456,7 +1456,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 25094, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22305])", @@ -1472,7 +1472,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 25309, "message": "objc[85294]: __weak variable at 0x600006452400 holds 0x2121212121212121 instead of 0x60000763bac0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1489,7 +1489,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 25572, "message": "2016-12-13 12:35:56.416 GoogleSoftwareUpdateAgent[22318/0x7fffcc3f93c0] [lvl=2] -[KSAgentApp setupLoggerOutput] Agent settings: ", "process.name": "GoogleSoftwareUpdateAgent", @@ -1506,7 +1506,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 26456, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22324])", @@ -1522,7 +1522,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 26671, "message": "objc[85294]: __weak variable at 0x60800f24d0f0 holds 0x2121212121212121 instead of 0x608007423ee0. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1539,7 +1539,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 26934, "message": "Invoked notification with id: aa608788-d049-4d1a-9112-521c71702371", "process.name": "Slack Helper", @@ -1556,7 +1556,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 27057, "message": "Unknown key for integer: _DirtyJetsamMemoryLimit", "process.name": "com.apple.xpc.launchd[1] (com.apple.imfoundation.IMRemoteURLConnectionAgent)", @@ -1572,7 +1572,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 27219, "message": "Invoked notification with id: d75f9ec1-a8fd-41c2-a45e-6df2952f0702", "process.name": "Slack Helper", @@ -1589,7 +1589,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 27342, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22336])", @@ -1605,7 +1605,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 27557, "message": "objc[85294]: __weak variable at 0x60800a2535a0 holds 0x2121212121212121 instead of 0x608003828e20. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1622,7 +1622,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 27820, "message": "ASL Sender Statistics", "process.name": "syslogd", @@ -1639,7 +1639,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 27890, "message": "objc[85294]: __weak variable at 0x60800f241d50 holds 0x2121212121212121 instead of 0x60800562f380. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1656,7 +1656,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 28153, "message": "Endpoint has been activated through legacy launch(3) APIs. Please switch to XPC or bootstrap_check_in(): com.apple.quicklook", "process.name": "com.apple.xpc.launchd[1] (com.apple.quicklook[22348])", @@ -1672,7 +1672,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 28368, "message": "objc[85294]: __weak variable at 0x60000c444450 holds 0x2121212121212121 instead of 0x600007237f00. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", @@ -1689,7 +1689,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", - "input.type": "system-logs", + "input.type": "log", "log.offset": 28631, "message": "objc[85294]: __weak variable at 0x60000c4424a0 holds 0x2121212121212121 instead of 0x600007026520. This is probably incorrect use of objc_storeWeak() and objc_loadWeak(). Break on objc_weak_error to debug.", "process.name": "Google Chrome", diff --git a/filebeat/module/system/syslog/test/debian-12.journal-expected.json b/filebeat/module/system/syslog/test/debian-12.journal-expected.json index aebf596762cc..3e9b606be267 100644 --- a/filebeat/module/system/syslog/test/debian-12.journal-expected.json +++ b/filebeat/module/system/syslog/test/debian-12.journal-expected.json @@ -7,7 +7,7 @@ "fileset.name": "syslog", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 3, "log.syslog.priority": 6, "message": "Stopped target getty.target - Login Prompts.", @@ -32,7 +32,7 @@ "fileset.name": "syslog", "host.hostname": "vagrant-debian-12", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 0, "log.syslog.priority": 6, "message": "Console: switching to colour frame buffer device 160x50", @@ -50,7 +50,7 @@ "fileset.name": "syslog", "host.hostname": "bookworm", "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "system-logs", + "input.type": "journald", "log.syslog.facility.code": 0, "log.syslog.priority": 6, "message": "thermal_sys: Registered thermal governor 'power_allocator'", diff --git a/filebeat/module/system/syslog/test/suse-syslog.log-expected.json b/filebeat/module/system/syslog/test/suse-syslog.log-expected.json index c07c51851def..4090efed2e73 100644 --- a/filebeat/module/system/syslog/test/suse-syslog.log-expected.json +++ b/filebeat/module/system/syslog/test/suse-syslog.log-expected.json @@ -6,7 +6,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "linux-sqrz", - "input.type": "system-logs", + "input.type": "log", "log.offset": 0, "message": "Stopped target Basic System.", "process.name": "systemd", @@ -23,7 +23,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "linux-sqrz", - "input.type": "system-logs", + "input.type": "log", "log.offset": 88, "message": "Stopped target Paths.", "process.name": "systemd", diff --git a/filebeat/module/system/syslog/test/tz-offset.log-expected.json b/filebeat/module/system/syslog/test/tz-offset.log-expected.json index eacba0d40acc..905d8cfd95d9 100644 --- a/filebeat/module/system/syslog/test/tz-offset.log-expected.json +++ b/filebeat/module/system/syslog/test/tz-offset.log-expected.json @@ -7,7 +7,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "rmbkmonitor04", - "input.type": "system-logs", + "input.type": "log", "log.file.path": "tz-offset.log", "log.offset": 0, "message": "shutting down for system halt", @@ -26,7 +26,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "rmbkmonitor04", - "input.type": "system-logs", + "input.type": "log", "log.file.path": "tz-offset.log", "log.offset": 89, "message": "constraint_0_power_limit_uw exceeded.", @@ -44,7 +44,7 @@ "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "localhost", - "input.type": "system-logs", + "input.type": "log", "log.file.path": "tz-offset.log", "log.offset": 184, "message": "pam_unix(sudo-i:session): session opened for user root by userauth3(uid=0)", From c1fc9a974d7ab9acb734544df7e13022c9db7ba6 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 16 Oct 2024 10:48:57 -0400 Subject: [PATCH 43/90] Fix error handling and add nolint comment (#41256) --------- Co-authored-by: Denis --- filebeat/input/systemlogs/input.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/filebeat/input/systemlogs/input.go b/filebeat/input/systemlogs/input.go index 05cd541d81c1..7badfda760cf 100644 --- a/filebeat/input/systemlogs/input.go +++ b/filebeat/input/systemlogs/input.go @@ -119,7 +119,7 @@ func useJournald(c *conf.C) (bool, error) { cfg := config{} if err := c.Unpack(&cfg); err != nil { - return false, nil + return false, fmt.Errorf("cannot unpack 'system-logs' config: %w", err) } if cfg.UseJournald { @@ -159,7 +159,7 @@ func useJournald(c *conf.C) (bool, error) { return true, nil } -func toJournaldConfig(cfg *conf.C) (*conf.C, error) { +func toJournaldConfig(cfg *conf.C) (*conf.C, error) { //nolint:unused // It's used on Linux newCfg, err := cfg.Child("journald", -1) if err != nil { return nil, fmt.Errorf("cannot extract 'journald' block: %w", err) From ac81fbd70a9682b46626a66e0b45036a82204202 Mon Sep 17 00:00:00 2001 From: Nicholas Berlin <56366649+nicholasberlin@users.noreply.github.com> Date: Wed, 16 Oct 2024 14:17:44 -0400 Subject: [PATCH 44/90] [Auditbeat] Use a separate netlink socket for control to avoid data congestion. (#41207) --- CHANGELOG.next.asciidoc | 2 +- auditbeat/module/auditd/audit_linux.go | 16 +++++-- auditbeat/module/auditd/audit_linux_test.go | 46 +++++++++++++++----- auditbeat/module/auditd/golden_files_test.go | 13 ++++-- 4 files changed, 58 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 72ff8083fea3..0ac51f5990c5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -122,7 +122,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Auditbeat* - +- Request status from a separate socket to avoid data congestion {pull}41207[41207] *Filebeat* diff --git a/auditbeat/module/auditd/audit_linux.go b/auditbeat/module/auditd/audit_linux.go index f627c0cbefd6..97f755ca4139 100644 --- a/auditbeat/module/auditd/audit_linux.go +++ b/auditbeat/module/auditd/audit_linux.go @@ -87,6 +87,7 @@ func init() { type MetricSet struct { mb.BaseMetricSet config Config + control *libaudit.AuditClient client *libaudit.AuditClient log *logp.Logger kernelLost struct { @@ -107,9 +108,14 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { _, _, kernel, _ := kernelVersion() log.Infof("auditd module is running as euid=%v on kernel=%v", os.Geteuid(), kernel) + control, err := libaudit.NewAuditClient(nil) + if err != nil { + return nil, fmt.Errorf("failed to create audit control client: %w", err) + } + client, err := newAuditClient(&config, log) if err != nil { - return nil, fmt.Errorf("failed to create audit client: %w", err) + return nil, fmt.Errorf("failed to create audit data client: %w", err) } reassemblerGapsMetric.Set(0) @@ -119,6 +125,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, + control: control, client: client, config: config, log: log, @@ -168,10 +175,13 @@ func closeAuditClient(client *libaudit.AuditClient, log *logp.Logger) { // kernel until the reporter's done channel is closed. func (ms *MetricSet) Run(reporter mb.PushReporterV2) { defer closeAuditClient(ms.client, ms.log) + defer ms.control.Close() // Don't attempt to change configuration if audit rules are locked (enabled == 2). - // Will result in EPERM. - status, err := ms.client.GetStatus() + // Will result in EPERM. Also, ensure that another socket is used to determine the + // status, because audit data can already buffering for ms.client. Which can lead + // to an ENOBUFS error bubbling up. + status, err := ms.control.GetStatus() if err != nil { err = fmt.Errorf("failed to get audit status before adding rules: %w", err) reporter.Error(err) diff --git a/auditbeat/module/auditd/audit_linux_test.go b/auditbeat/module/auditd/audit_linux_test.go index 9f9950d1050e..f0358b7a71c5 100644 --- a/auditbeat/module/auditd/audit_linux_test.go +++ b/auditbeat/module/auditd/audit_linux_test.go @@ -75,10 +75,12 @@ var ( func TestImmutable(t *testing.T) { logp.TestingSetup() - // Create a mock netlink client that provides the expected responses. - mock := NewMock(). + // Create mocks of netlink client and control that provide the expected responses. + controlMock := NewMock(). // Get Status response for initClient - returnACK().returnStatus(). + returnACK().returnStatus() + + mock := NewMock(). // Send expected ACKs for initialization // With one extra for SetImmutable returnACK().returnStatus().returnACK().returnACK(). @@ -91,7 +93,13 @@ func TestImmutable(t *testing.T) { config["immutable"] = true ms := mbtest.NewPushMetricSetV2WithRegistry(t, config, ab.Registry) - auditMetricSet := ms.(*MetricSet) + auditMetricSet, ok := ms.(*MetricSet) + if !ok { + t.Fatalf("Expected *MetricSet but got %T", ms) + } + + auditMetricSet.control.Close() + auditMetricSet.control = &libaudit.AuditClient{Netlink: controlMock} auditMetricSet.client.Close() auditMetricSet.client = &libaudit.AuditClient{Netlink: mock} @@ -110,10 +118,12 @@ func TestImmutable(t *testing.T) { func TestData(t *testing.T) { logp.TestingSetup() - // Create a mock netlink client that provides the expected responses. - mock := NewMock(). + // Create mocks of netlink client and control that provide the expected responses. + controlMock := NewMock(). // Get Status response for initClient - returnACK().returnStatus(). + returnACK().returnStatus() + + mock := NewMock(). // Send expected ACKs for initialization returnACK().returnStatus().returnACK().returnACK(). returnACK().returnACK().returnACK(). @@ -124,7 +134,12 @@ func TestData(t *testing.T) { // Replace the default AuditClient with a mock. ms := mbtest.NewPushMetricSetV2WithRegistry(t, getConfig(), ab.Registry) - auditMetricSet := ms.(*MetricSet) + auditMetricSet, ok := ms.(*MetricSet) + if !ok { + t.Fatalf("Expected *MetricSet but got %T", ms) + } + auditMetricSet.control.Close() + auditMetricSet.control = &libaudit.AuditClient{Netlink: controlMock} auditMetricSet.client.Close() auditMetricSet.client = &libaudit.AuditClient{Netlink: mock} @@ -143,10 +158,12 @@ func TestData(t *testing.T) { func TestLoginType(t *testing.T) { logp.TestingSetup() - // Create a mock netlink client that provides the expected responses. - mock := NewMock(). + // Create mocks of netlink client and control that provide the expected responses. + controlMock := NewMock(). // Get Status response for initClient - returnACK().returnStatus(). + returnACK().returnStatus() + + mock := NewMock(). // Send expected ACKs for initialization returnACK().returnStatus().returnACK().returnACK(). returnACK().returnACK().returnACK(). @@ -157,7 +174,12 @@ func TestLoginType(t *testing.T) { // Replace the default AuditClient with a mock. ms := mbtest.NewPushMetricSetV2WithRegistry(t, getConfig(), ab.Registry) - auditMetricSet := ms.(*MetricSet) + auditMetricSet, ok := ms.(*MetricSet) + if !ok { + t.Fatalf("Expected *MetricSet but got %T", ms) + } + auditMetricSet.control.Close() + auditMetricSet.control = &libaudit.AuditClient{Netlink: controlMock} auditMetricSet.client.Close() auditMetricSet.client = &libaudit.AuditClient{Netlink: mock} diff --git a/auditbeat/module/auditd/golden_files_test.go b/auditbeat/module/auditd/golden_files_test.go index 096d53d1b903..a121b9371dc6 100644 --- a/auditbeat/module/auditd/golden_files_test.go +++ b/auditbeat/module/auditd/golden_files_test.go @@ -191,9 +191,11 @@ func TestGoldenFiles(t *testing.T) { if err != nil { t.Fatalf("error reading log file '%s': %v", file, err) } - mock := NewMock(). + // Create mocks of netlink client and control that provide the expected responses. + controlMock := NewMock(). // Get Status response for initClient - returnACK().returnStatus(). + returnACK().returnStatus() + mock := NewMock(). // Send expected ACKs for initialization returnACK().returnStatus().returnACK().returnACK(). returnACK().returnACK().returnACK(). @@ -203,7 +205,12 @@ func TestGoldenFiles(t *testing.T) { returnMessage(terminator) ms := mbtest.NewPushMetricSetV2WithRegistry(t, configForGolden(), ab.Registry) - auditMetricSet := ms.(*MetricSet) + auditMetricSet, ok := ms.(*MetricSet) + if !ok { + t.Fatalf("Expected *MetricSet but got %T", ms) + } + auditMetricSet.control.Close() + auditMetricSet.control = &libaudit.AuditClient{Netlink: controlMock} auditMetricSet.client.Close() auditMetricSet.client = &libaudit.AuditClient{Netlink: mock} mbEvents := runTerminableReporter(fileTimeout, ms, isTestEvent) From 9873d15a4d22a090f69f3afa203c0218b9994c56 Mon Sep 17 00:00:00 2001 From: Denis Date: Wed, 16 Oct 2024 22:47:19 +0200 Subject: [PATCH 45/90] =?UTF-8?q?Revert=20"build(deps):=20bump=20the=20gcp?= =?UTF-8?q?-sdks=20group=20across=201=20directory=20with=2010=20updat?= =?UTF-8?q?=E2=80=A6"=20(#41269)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 89ed20d5ea412ae913fcff6730d3d1304410a990. Due to ``` /usr/lib/gcc-cross/aarch64-linux-gnu/6/../../../../aarch64-linux-gnu/bin/ld.gold: internal error in maybe_apply_stub, at ../../gold/aarch64.cc:5407 collect2: error: ld returned 1 exit status ``` This building error occurred after the dependency update which this change reverts. --- NOTICE.txt | 7659 ++++++++++++++++------------------------------------ go.mod | 55 +- go.sum | 122 +- 3 files changed, 2395 insertions(+), 5441 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 74fdd66fd1f1..b5df79133f70 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -11,11 +11,11 @@ Third party libraries used by the Elastic Beats project: -------------------------------------------------------------------------------- Dependency : cloud.google.com/go -Version: v0.115.1 +Version: v0.115.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.115.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.115.0/LICENSE: Apache License @@ -223,11 +223,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.115.1/LICEN -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/bigquery -Version: v1.63.1 +Version: v1.62.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.63.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.62.0/LICENSE: Apache License @@ -435,11 +435,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.63 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/compute -Version: v1.28.0 +Version: v1.27.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.28.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.27.4/LICENSE: Apache License @@ -647,11 +647,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.28. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/monitoring -Version: v1.21.0 +Version: v1.20.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1.21.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1.20.4/LICENSE: Apache License @@ -859,11 +859,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/pubsub -Version: v1.42.0 +Version: v1.41.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.42.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.41.0/LICENSE: Apache License @@ -1071,11 +1071,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.42.0 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/redis -Version: v1.17.0 +Version: v1.16.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/redis@v1.17.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/redis@v1.16.4/LICENSE: Apache License @@ -1283,11 +1283,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/redis@v1.17.0/ -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/storage -Version: v1.44.0 +Version: v1.43.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.44.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.43.0/LICENSE: Apache License @@ -25472,11 +25472,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/oauth2 -Version: v0.23.0 +Version: v0.22.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.23.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/oauth2@v0.22.0/LICENSE: Copyright 2009 The Go Authors. @@ -25694,11 +25694,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/api -Version: v0.197.0 +Version: v0.191.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/api@v0.197.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/api@v0.191.0/LICENSE: Copyright (c) 2011 Google Inc. All rights reserved. @@ -25731,11 +25731,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto/googleapis/api -Version: v0.0.0-20240903143218-8af14fe29dc1 +Version: v0.0.0-20240725223205-93522f1f2a9f Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/api@v0.0.0-20240903143218-8af14fe29dc1/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/api@v0.0.0-20240725223205-93522f1f2a9f/LICENSE: Apache License @@ -25943,11 +25943,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googlea -------------------------------------------------------------------------------- Dependency : google.golang.org/grpc -Version: v1.66.2 +Version: v1.66.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.66.2/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.66.0/LICENSE: Apache License @@ -28040,12 +28040,12 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : cel.dev/expr -Version: v0.16.1 +Dependency : cloud.google.com/go/auth +Version: v0.8.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cel.dev/expr@v0.16.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth@v0.8.0/LICENSE: Apache License @@ -28252,12 +28252,12 @@ Contents of probable licence file $GOMODCACHE/cel.dev/expr@v0.16.1/LICENSE: -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/auth -Version: v0.9.3 +Dependency : cloud.google.com/go/auth/oauth2adapt +Version: v0.2.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth@v0.9.3/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth/oauth2adapt@v0.2.4/LICENSE: Apache License @@ -28464,12 +28464,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth@v0.9.3/LI -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/auth/oauth2adapt -Version: v0.2.4 +Dependency : cloud.google.com/go/compute/metadata +Version: v0.5.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth/oauth2adapt@v0.2.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metadata@v0.5.0/LICENSE: Apache License @@ -28676,12 +28676,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/auth/oauth2ada -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/compute/metadata -Version: v0.5.2 +Dependency : cloud.google.com/go/datacatalog +Version: v1.20.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metadata@v0.5.2/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1.20.5/LICENSE: Apache License @@ -28888,12 +28888,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metada -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/datacatalog -Version: v1.22.1 +Dependency : cloud.google.com/go/iam +Version: v1.1.12 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1.22.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.1.12/LICENSE: Apache License @@ -29100,12 +29100,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1 -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/iam -Version: v1.2.1 +Dependency : cloud.google.com/go/kms +Version: v1.18.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.2.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.18.4/LICENSE: Apache License @@ -29312,12 +29312,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.2.1/LIC -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/kms -Version: v1.19.0 +Dependency : cloud.google.com/go/longrunning +Version: v0.5.11 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/longrunning@v0.5.11/LICENSE: Apache License @@ -29524,13 +29524,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.19.0/LI -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/logging -Version: v1.11.0 +Dependency : code.cloudfoundry.org/go-diodes +Version: v0.0.0-20190809170250-f77fb823c7ee Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/logging@v1.11.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: Apache License Version 2.0, January 2004 @@ -29734,14 +29733,13 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/logging@v1.11. See the License for the specific language governing permissions and limitations under the License. - -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/longrunning -Version: v0.6.1 +Dependency : code.cloudfoundry.org/gofileutils +Version: v0.0.0-20170111115228-4d0c80011a0f Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/longrunning@v0.6.1/LICENSE: +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/gofileutils@v0.0.0-20170111115228-4d0c80011a0f/LICENSE: Apache License @@ -29948,681 +29946,47 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/longrunning@v0 -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/trace -Version: v1.11.0 -Licence type (autodetected): Apache-2.0 +Dependency : code.cloudfoundry.org/rfc5424 +Version: v0.0.0-20180905210152-236a6d29298a +Licence type (autodetected): BSD-2-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/trace@v1.11.0/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/rfc5424@v0.0.0-20180905210152-236a6d29298a/LICENSE: - APPENDIX: How to apply the Apache License to your work. +BSD 2-Clause License - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +Copyright (c) 2016, Ross Kinder +All rights reserved. - Copyright [yyyy] [name of copyright owner] +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. - http://www.apache.org/licenses/LICENSE-2.0 +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/go-diodes -Version: v0.0.0-20190809170250-f77fb823c7ee +Dependency : github.com/AdaLogics/go-fuzz-headers +Version: v0.0.0-20230811130428-ced1acdcaa24 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/gofileutils -Version: v0.0.0-20170111115228-4d0c80011a0f -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/gofileutils@v0.0.0-20170111115228-4d0c80011a0f/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/rfc5424 -Version: v0.0.0-20180905210152-236a6d29298a -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/rfc5424@v0.0.0-20180905210152-236a6d29298a/LICENSE: - -BSD 2-Clause License - -Copyright (c) 2016, Ross Kinder -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/AdaLogics/go-fuzz-headers -Version: v0.0.0-20230811130428-ced1acdcaa24 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/!ada!logics/go-fuzz-headers@v0.0.0-20230811130428-ced1acdcaa24/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!ada!logics/go-fuzz-headers@v0.0.0-20230811130428-ced1acdcaa24/LICENSE: Apache License Version 2.0, January 2004 @@ -32774,224 +32138,216 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-au -------------------------------------------------------------------------------- -Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp -Version: v1.24.1 -Licence type (autodetected): Apache-2.0 +Dependency : github.com/JohnCGriffin/overflow +Version: v0.0.0-20211019200055-46fa312c352c +Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/detectors/gcp@v1.24.1/LICENSE: +No licence file provided. +-------------------------------------------------------------------------------- +Dependency : github.com/Masterminds/semver +Version: v1.5.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Contents of probable licence file $GOMODCACHE/github.com/!masterminds/semver@v1.5.0/LICENSE.txt: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +Copyright (C) 2014-2019, Matt Butcher and Matt Farina - 1. Definitions. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +-------------------------------------------------------------------------------- +Dependency : github.com/Shopify/toxiproxy +Version: v2.1.4+incompatible +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +Contents of probable licence file $GOMODCACHE/github.com/!shopify/toxiproxy@v2.1.4+incompatible/LICENSE: - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +The MIT License (MIT) - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +Copyright (c) 2014 Shopify - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +-------------------------------------------------------------------------------- +Dependency : github.com/akavel/rsrc +Version: v0.8.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +Contents of probable licence file $GOMODCACHE/github.com/akavel/rsrc@v0.8.0/LICENSE.txt: - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +The MIT License (MIT) + +Copyright (c) 2013-2017 The rsrc Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +-------------------------------------------------------------------------------- +Dependency : github.com/alexbrainman/sspi +Version: v0.0.0-20210105120005-909beea2cc74 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +Contents of probable licence file $GOMODCACHE/github.com/alexbrainman/sspi@v0.0.0-20210105120005-909beea2cc74/LICENSE: - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Copyright (c) 2012 The Go Authors. All rights reserved. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS +-------------------------------------------------------------------------------- +Dependency : github.com/andybalholm/brotli +Version: v1.0.5 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - APPENDIX: How to apply the Apache License to your work. +Contents of probable licence file $GOMODCACHE/github.com/andybalholm/brotli@v1.0.5/LICENSE: - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. - Copyright [yyyy] [name of copyright owner] +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - http://www.apache.org/licenses/LICENSE-2.0 +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + +-------------------------------------------------------------------------------- +Dependency : github.com/antlr4-go/antlr/v4 +Version: v4.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/antlr4-go/antlr/v4@v4.13.0/LICENSE: + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric -Version: v0.48.1 +Dependency : github.com/apache/arrow/go/v15 +Version: v15.0.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/exporter/metric@v0.48.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/apache/arrow/go/v15@v15.0.2/LICENSE.txt: Apache License @@ -33196,448 +32552,339 @@ Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/ See the License for the specific language governing permissions and limitations under the License. - --------------------------------------------------------------------------------- -Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock -Version: v0.48.1 -Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/internal/cloudmock@v0.48.1/LICENSE: +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ + Copyright (c) 2013 The Chromium Authors. All rights reserved. - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: - 1. Definitions. + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +-------------------------------------------------------------------------------- - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +This project includes code from Daniel Lemire's FrameOfReference project. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +-------------------------------------------------------------------------------- - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +This project includes code from the TensorFlow project - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +Copyright 2015 The TensorFlow Authors. All Rights Reserved. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. + http://www.apache.org/licenses/LICENSE-2.0 - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +-------------------------------------------------------------------------------- - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +This project includes code from the NumPy project. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +-------------------------------------------------------------------------------- - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +This project includes code from the Boost project - END OF TERMS AND CONDITIONS +Boost Software License - Version 1.0 - August 17th, 2003 - APPENDIX: How to apply the Apache License to your work. +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. - Copyright [yyyy] [name of copyright owner] +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +-------------------------------------------------------------------------------- - http://www.apache.org/licenses/LICENSE-2.0 +This project includes code from the FlatBuffers project - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright 2014 Google Inc. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at --------------------------------------------------------------------------------- -Dependency : github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping -Version: v0.48.1 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- + http://www.apache.org/licenses/LICENSE-2.0 -Contents of probable licence file $GOMODCACHE/github.com/!google!cloud!platform/opentelemetry-operations-go/internal/resourcemapping@v0.48.1/LICENSE: +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +-------------------------------------------------------------------------------- - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +This project includes code from the tslib project - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +Copyright 2015 Microsoft Corporation. All rights reserved. - 1. Definitions. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. + http://www.apache.org/licenses/LICENSE-2.0 - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +-------------------------------------------------------------------------------- - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +This project includes code from the jemalloc project - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +https://github.com/jemalloc/jemalloc - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +Copyright (c) 2009 The Go Authors. All rights reserved. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +-------------------------------------------------------------------------------- - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +This project includes code from the hs2client - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +https://github.com/cloudera/hs2client - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +Copyright 2016 Cloudera Inc. - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +http://www.apache.org/licenses/LICENSE-2.0 - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +-------------------------------------------------------------------------------- - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +The script ci/scripts/util_wait_for_it.sh has the following license - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +Copyright (c) 2016 Giles Hall - END OF TERMS AND CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: - APPENDIX: How to apply the Apache License to your work. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - Copyright [yyyy] [name of copyright owner] +-------------------------------------------------------------------------------- - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +The script r/configure has the following license (MIT) - http://www.apache.org/licenses/LICENSE-2.0 +Copyright (c) 2017, Jeroen Ooms and Jim Hester - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/JohnCGriffin/overflow -Version: v0.0.0-20211019200055-46fa312c352c -Licence type (autodetected): MIT -------------------------------------------------------------------------------- -No licence file provided. +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). --------------------------------------------------------------------------------- -Dependency : github.com/Masterminds/semver -Version: v1.5.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) -Contents of probable licence file $GOMODCACHE/github.com/!masterminds/semver@v1.5.0/LICENSE.txt: +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -Copyright (C) 2014-2019, Matt Butcher and Matt Farina + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. + +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiński Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -33646,29 +32893,24 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/Shopify/toxiproxy -Version: v2.1.4+incompatible -Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!shopify/toxiproxy@v2.1.4+incompatible/LICENSE: - -The MIT License (MIT) +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) -Copyright (c) 2014 Shopify +Copyright (c) 2008-2009 Bjoern Hoehrmann Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -33688,62 +32930,87 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +The file cpp/src/arrow/vendored/string_view.hpp has the following license --------------------------------------------------------------------------------- -Dependency : github.com/akavel/rsrc -Version: v0.8.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- +Boost Software License - Version 1.0 - August 17th, 2003 -Contents of probable licence file $GOMODCACHE/github.com/akavel/rsrc@v0.8.0/LICENSE.txt: +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: -The MIT License (MIT) - -Copyright (c) 2013-2017 The rsrc Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/alexbrainman/sspi -Version: v0.0.0-20210105120005-909beea2cc74 -Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/alexbrainman/sspi@v0.0.0-20210105120005-909beea2cc74/LICENSE: +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) -Copyright (c) 2012 The Go Authors. All rights reserved. +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) +Copyright 2006-2011, the V8 project authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -33757,376 +33024,283 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- -Dependency : github.com/andybalholm/brotli -Version: v1.0.5 -Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/andybalholm/brotli@v1.0.5/LICENSE: +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. +uriparser - RFC 3986 URI parsing library -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. --------------------------------------------------------------------------------- -Dependency : github.com/antlr4-go/antlr/v4 -Version: v4.13.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. -Contents of probable licence file $GOMODCACHE/github.com/antlr4-go/antlr/v4@v4.13.0/LICENSE: +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. -Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. +-------------------------------------------------------------------------------- -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: +The files under dev/tasks/conda-recipes have the following license -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -3. Neither name of copyright holders nor the names of its contributors -may be used to endorse or promote products derived from this software -without specific prior written permission. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/apache/arrow/go/v15 -Version: v15.0.2 -Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apache/arrow/go/v15@v15.0.2/LICENSE.txt: +The files in cpp/src/arrow/vendored/utf8cpp/ have the following license +Copyright 2006 Nemanja Trifunovic - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. - 1. Definitions. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +-------------------------------------------------------------------------------- - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +This project includes code from Apache Kudu. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +-------------------------------------------------------------------------------- - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +-------------------------------------------------------------------------------- - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +This project includes code from Apache Aurora. - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +-------------------------------------------------------------------------------- - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +This project includes code from the Google styleguide. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +-------------------------------------------------------------------------------- - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +This project includes code from Snappy. - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +-------------------------------------------------------------------------------- - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +This project includes code from the manylinux project. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) - END OF TERMS AND CONDITIONS +-------------------------------------------------------------------------------- - APPENDIX: How to apply the Apache License to your work. +This project includes code from the cymove project: - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +* python/pyarrow/includes/common.pxd includes code from the cymove project - Copyright [yyyy] [name of copyright owner] +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - http://www.apache.org/licenses/LICENSE-2.0 +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -src/arrow/util (some portions): Apache 2.0, and 3-clause BSD +The projects includes code from the Ursabot project under the dev/archery +directory. -Some portions of this module are derived from code in the Chromium project, -copyright (c) Google inc and (c) The Chromium Authors and licensed under the -Apache 2.0 License or the under the 3-clause BSD license: +License: BSD 2-Clause - Copyright (c) 2013 The Chromium Authors. All rights reserved. +Copyright 2019 RStudio, Inc. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -This project includes code from Daniel Lemire's FrameOfReference project. +This project include code from CMake. -https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +* cpp/cmake_modules/FindGTest.cmake is based on code from CMake. -Copyright: 2013 Daniel Lemire -Home page: http://lemire.me/en/ -Project page: https://github.com/lemire/FrameOfReference -License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 +Copyright: Copyright 2000-2019 Kitware, Inc. and Contributors +Homepage: https://gitlab.kitware.com/cmake/cmake +License: 3-clause BSD -------------------------------------------------------------------------------- -This project includes code from the TensorFlow project +This project include code from mingw-w64. -Copyright 2015 The TensorFlow Authors. All Rights Reserved. +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. - http://www.apache.org/licenses/LICENSE-2.0 +--------------------------------------------------------------------------------- -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +This project include code from Google's Asylo project. --------------------------------------------------------------------------------- +* cpp/src/arrow/result.h is based on status_or.h -This project includes code from the NumPy project. +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 -https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 +-------------------------------------------------------------------------------- -https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c +This project includes code from Google's protobuf project -Copyright (c) 2005-2017, NumPy Developers. -All rights reserved. +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - +notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -34140,39 +33314,92 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + -------------------------------------------------------------------------------- -This project includes code from the Boost project +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: -Boost Software License - Version 1.0 - August 17th, 2003 +============================================================================== +LLVM Release License +============================================================================== +University of Illinois/NCSA +Open Source License -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: +Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign. +All rights reserved. -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== +Copyrights and Licenses for Third Party Software Distributed with LLVM: +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + +The following pieces of software have additional or alternate copyrights, +licenses, and/or restrictions: + +Program Directory +------- --------- +Google Test llvm/utils/unittest/googletest +OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} +pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} +ARM contributions llvm/lib/Target/ARM/LICENSE.TXT +md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h -------------------------------------------------------------------------------- -This project includes code from the FlatBuffers project +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: -Copyright 2014 Google Inc. +Copyright 2014 gRPC authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34188,15 +33415,20 @@ limitations under the License. -------------------------------------------------------------------------------- -This project includes code from the tslib project +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: -Copyright 2015 Microsoft Corporation. All rights reserved. +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -34206,80 +33438,23 @@ limitations under the License. -------------------------------------------------------------------------------- -This project includes code from the jemalloc project - -https://github.com/jemalloc/jemalloc - -Copyright (C) 2002-2017 Jason Evans . -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- - -This project includes code from the Go project, BSD 3-clause license + PATENTS -weak patent termination clause -(https://github.com/golang/go/blob/master/PATENTS). - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: -This project includes code from the hs2client +Apache ORC +Copyright 2013-2019 The Apache Software Foundation -https://github.com/cloudera/hs2client +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). -Copyright 2016 Cloudera Inc. +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -34289,86 +33464,76 @@ limitations under the License. -------------------------------------------------------------------------------- -The script ci/scripts/util_wait_for_it.sh has the following license - -Copyright (c) 2016 Giles Hall - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +BSD License -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +For Zstandard software --------------------------------------------------------------------------------- +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. -The script r/configure has the following license (MIT) +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -Copyright (c) 2017, Jeroen Ooms and Jim Hester + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and -cpp/src/arrow/util/logging-test.cc are adapted from -Ray Project (https://github.com/ray-project/ray) (Apache 2.0). +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: -Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - http://www.apache.org/licenses/LICENSE-2.0 +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, -cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, -cpp/src/arrow/vendored/datetime/ios.mm, -cpp/src/arrow/vendored/datetime/tz.cpp are adapted from -Howard Hinnant's date library (https://github.com/HowardHinnant/date) -It is licensed under MIT license. -The MIT License (MIT) -Copyright (c) 2015, 2016, 2017 Howard Hinnant -Copyright (c) 2016 Adrian Colomitchi -Copyright (c) 2017 Florian Dang -Copyright (c) 2017 Paul Thompson -Copyright (c) 2018 Tomasz Kamiński +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -34377,124 +33542,131 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -------------------------------------------------------------------------------- -The file cpp/src/arrow/util/utf8.h includes code adapted from the page - https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ -with the following license (MIT) +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: -Copyright (c) 2008-2009 Bjoern Hoehrmann +Copyright 2011, Google Inc. +All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- +=== -The file cpp/src/arrow/vendored/string_view.hpp has the following license +Some of the benchmark data in testdata/ is licensed differently: -Boost Software License - Version 1.0 - August 17th, 2003 + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). -------------------------------------------------------------------------------- -The files in cpp/src/arrow/vendored/xxhash/ have the following license -(BSD 2-Clause License) +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: -xxHash Library -Copyright (c) 2012-2014, Yann Collet +Copyright (c) 2006, Google Inc. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -You can contact the author at : -- xxHash homepage: http://www.xxhash.com -- xxHash source repository : https://github.com/Cyan4973/xxHash +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -The files in cpp/src/arrow/vendored/double-conversion/ have the following license -(BSD 3-Clause License) +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. -Copyright 2006-2011, the V8 project authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. +notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -34508,285 +33680,300 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -The files in cpp/src/arrow/vendored/uriparser/ have the following license -(BSD 3-Clause License) +A function gettimeofday in utilities.cc is based on -uriparser - RFC 3986 URI parsing library +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd -Copyright (C) 2007, Weijia Song -Copyright (C) 2007, Sebastian Pipping -All rights reserved. +The license of this code is: -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. - * Redistributions of source code must retain the above - copyright notice, this list of conditions and the following - disclaimer. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - * Neither the name of the nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files under dev/tasks/conda-recipes have the following license - -BSD 3-clause license -Copyright (c) 2015-2018, conda-forge -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/utf8cpp/ have the following license - -Copyright 2006 Nemanja Trifunovic - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. -This project includes code from Apache Kudu. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. - * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. -Copyright: 2016 The Apache Software Foundation. -Home page: https://kudu.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -This project includes code from Apache Impala (incubating), formerly -Impala. The Impala code and rights were donated to the ASF as part of the -Incubator process after the initial code imports into Apache Parquet. - -Copyright: 2012 Cloudera, Inc. -Copyright: 2016 The Apache Software Foundation. -Home page: http://impala.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: --------------------------------------------------------------------------------- +Copyright (c) 2009 The RE2 Authors. All rights reserved. -This project includes code from Apache Aurora. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -* dev/release/{release,changelog,release-candidate} are based on the scripts from - Apache Aurora + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. -Copyright: 2016 The Apache Software Foundation. -Home page: https://aurora.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -This project includes code from the Google styleguide. - -* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. - -Copyright: 2009 Google Inc. All rights reserved. -Homepage: https://github.com/google/styleguide -License: 3-clause BSD +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: --------------------------------------------------------------------------------- +# c-ares license -This project includes code from Snappy. +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. -* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code - from Google's Snappy project. +Copyright 1998 by the Massachusetts Institute of Technology. -Copyright: 2009 Google Inc. All rights reserved. -Homepage: https://github.com/google/snappy -License: 3-clause BSD +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. -------------------------------------------------------------------------------- -This project includes code from the manylinux project. - -* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, - requirements.txt} are based on code from the manylinux project. - -Copyright: 2016 manylinux -Homepage: https://github.com/pypa/manylinux -License: The MIT License (MIT) - --------------------------------------------------------------------------------- +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: -This project includes code from the cymove project: +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 -* python/pyarrow/includes/common.pxd includes code from the cymove project + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler -The MIT License (MIT) -Copyright (c) 2019 Omer Ozarslan + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE -OR OTHER DEALINGS IN THE SOFTWARE. + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu -------------------------------------------------------------------------------- -The projects includes code from the Ursabot project under the dev/archery -directory. +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: -License: BSD 2-Clause + LICENSE ISSUES + ============== -Copyright 2019 RStudio, Inc. + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: + OpenSSL License + --------------- -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + Original SSLeay License + ----------------------- -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ -------------------------------------------------------------------------------- -This project include code from CMake. +This project includes code from the rtools-backports project. -* cpp/cmake_modules/FindGTest.cmake is based on code from CMake. +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. -Copyright: Copyright 2000-2019 Kitware, Inc. and Contributors -Homepage: https://gitlab.kitware.com/cmake/cmake +Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports License: 3-clause BSD -------------------------------------------------------------------------------- -This project include code from mingw-w64. - -* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 - -Copyright (c) 2009 - 2013 by the mingw-w64 project -Homepage: https://mingw-w64.org -License: Zope Public License (ZPL) Version 2.1. - ---------------------------------------------------------------------------------- - -This project include code from Google's Asylo project. - -* cpp/src/arrow/result.h is based on status_or.h - -Copyright (c) Copyright 2017 Asylo authors -Homepage: https://asylo.dev/ -License: Apache 2.0 - --------------------------------------------------------------------------------- +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: -This project includes code from Google's protobuf project +pandas license +============== -* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. -Copyright 2008 Google Inc. All rights reserved. -Homepage: https://developers.google.com/protocol-buffers/ -License: +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT @@ -34798,1430 +33985,170 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - -------------------------------------------------------------------------------- -3rdparty dependency LLVM is statically linked in certain binary distributions. -Additionally some sections of source code have been derived from sources in LLVM -and have been clearly labeled as such. LLVM has the following license: +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license -============================================================================== -LLVM Release License -============================================================================== -University of Illinois/NCSA -Open Source License +The BSD 2-Clause License -Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign. -All rights reserved. + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. -Developed by: + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: - LLVM Team + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - University of Illinois at Urbana-Champaign + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. - http://llvm.org + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal with -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: +Dynamic NDArray Developers list: - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimers. + * Mark Wiebe + * Continuum Analytics - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. +-------------------------------------------------------------------------------- - * Neither the names of the LLVM Team, University of Illinois at - Urbana-Champaign, nor the names of its contributors may be used to - endorse or promote products derived from this Software without specific - prior written permission. +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE -SOFTWARE. +-------------------------------------------------------------------------------- -============================================================================== -Copyrights and Licenses for Third Party Software Distributed with LLVM: -============================================================================== -The LLVM software contains code written by third parties. Such software will -have its own individual LICENSE.TXT file in the directory in which it appears. -This file will describe the copyrights, license, and restrictions which apply -to that code. +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: -The disclaimer of warranty in the University of Illinois Open Source License -applies to all code in the LLVM Distribution, and nothing in any of the -other licenses gives permission to use the names of the LLVM Team or the -University of Illinois to endorse or promote products derived from this -Software. - -The following pieces of software have additional or alternate copyrights, -licenses, and/or restrictions: - -Program Directory -------- --------- -Google Test llvm/utils/unittest/googletest -OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} -pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} -ARM contributions llvm/lib/Target/ARM/LICENSE.TXT -md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h - --------------------------------------------------------------------------------- - -3rdparty dependency gRPC is statically linked in certain binary -distributions, like the python wheels. gRPC has the following license: - -Copyright 2014 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency Apache Thrift is statically linked in certain binary -distributions, like the python wheels. Apache Thrift has the following license: - -Apache Thrift -Copyright (C) 2006 - 2019, The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency Apache ORC is statically linked in certain binary -distributions, like the python wheels. Apache ORC has the following license: - -Apache ORC -Copyright 2013-2019 The Apache Software Foundation - -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). - -This product includes software developed by Hewlett-Packard: -(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency zstd is statically linked in certain binary -distributions, like the python wheels. ZSTD has the following license: - -BSD License - -For Zstandard software - -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency lz4 is statically linked in certain binary -distributions, like the python wheels. lz4 has the following license: +BSD 2-Clause License -LZ4 Library -Copyright (c) 2011-2016, Yann Collet +Copyright (c) 2009-present, Homebrew contributors All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency Brotli is statically linked in certain binary -distributions, like the python wheels. Brotli has the following license: - -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - --------------------------------------------------------------------------------- - -3rdparty dependency snappy is statically linked in certain binary -distributions, like the python wheels. snappy has the following license: - -Copyright 2011, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -=== - -Some of the benchmark data in testdata/ is licensed differently: - - - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and - is licensed under the Creative Commons Attribution 3.0 license - (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ - for more information. - - - kppkn.gtb is taken from the Gaviota chess tablebase set, and - is licensed under the MIT License. See - https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 - for more information. - - - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper - “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA - Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, - which is licensed under the CC-BY license. See - http://www.ploscompbiol.org/static/license for more ifnormation. - - - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project - Gutenberg. The first three have expired copyrights and are in the public - domain; the latter does not have expired copyright, but is still in the - public domain according to the license information - (http://www.gutenberg.org/ebooks/53). - --------------------------------------------------------------------------------- - -3rdparty dependency gflags is statically linked in certain binary -distributions, like the python wheels. gflags has the following license: - -Copyright (c) 2006, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency glog is statically linked in certain binary -distributions, like the python wheels. glog has the following license: - -Copyright (c) 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -A function gettimeofday in utilities.cc is based on - -http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd - -The license of this code is: - -Copyright (c) 2003-2008, Jouni Malinen and contributors -All Rights Reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name(s) of the above-listed copyright holder(s) nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency re2 is statically linked in certain binary -distributions, like the python wheels. re2 has the following license: - -Copyright (c) 2009 The RE2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name of Google Inc. nor the names of its contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency c-ares is statically linked in certain binary -distributions, like the python wheels. c-ares has the following license: - -# c-ares license - -Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS -file. - -Copyright 1998 by the Massachusetts Institute of Technology. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, provided that -the above copyright notice appear in all copies and that both that copyright -notice and this permission notice appear in supporting documentation, and that -the name of M.I.T. not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior permission. -M.I.T. makes no representations about the suitability of this software for any -purpose. It is provided "as is" without express or implied warranty. - --------------------------------------------------------------------------------- - -3rdparty dependency zlib is redistributed as a dynamically linked shared -library in certain binary distributions, like the python wheels. In the future -this will likely change to static linkage. zlib has the following license: - -zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.11, January 15th, 2017 - - Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - --------------------------------------------------------------------------------- - -3rdparty dependency openssl is redistributed as a dynamically linked shared -library in certain binary distributions, like the python wheels. openssl -preceding version 3 has the following license: - - LICENSE ISSUES - ============== - - The OpenSSL toolkit stays under a double license, i.e. both the conditions of - the OpenSSL License and the original SSLeay license apply to the toolkit. - See below for the actual license texts. - - OpenSSL License - --------------- - -/* ==================================================================== - * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - - Original SSLeay License - ----------------------- - -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ - --------------------------------------------------------------------------------- - -This project includes code from the rtools-backports project. - -* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code - from the rtools-backports project. - -Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. -All rights reserved. -Homepage: https://github.com/r-windows/rtools-backports -License: 3-clause BSD - --------------------------------------------------------------------------------- - -Some code from pandas has been adapted for the pyarrow codebase. pandas is -available under the 3-clause BSD license, which follows: - -pandas license -============== - -Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team -All rights reserved. - -Copyright (c) 2008-2011 AQR Capital Management, LLC -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the copyright holder nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -Some bits from DyND, in particular aspects of the build system, have been -adapted from libdynd and dynd-python under the terms of the BSD 2-clause -license - -The BSD 2-Clause License - - Copyright (C) 2011-12, Dynamic NDArray Developers - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Dynamic NDArray Developers list: - - * Mark Wiebe - * Continuum Analytics - --------------------------------------------------------------------------------- - -Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted -for PyArrow. Ibis is released under the Apache License, Version 2.0. - --------------------------------------------------------------------------------- - -dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: - -BSD 2-Clause License - -Copyright (c) 2009-present, Homebrew contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- - -cpp/src/arrow/vendored/base64.cpp has the following license - -ZLIB License - -Copyright (C) 2004-2017 René Nyffenegger - -This source code is provided 'as-is', without any express or implied -warranty. In no event will the author be held liable for any damages arising -from the use of this software. - -Permission is granted to anyone to use this software for any purpose, including -commercial applications, and to alter it and redistribute it freely, subject to -the following restrictions: - -1. The origin of this source code must not be misrepresented; you must not - claim that you wrote the original source code. If you use this source code - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - -2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original source code. - -3. This notice may not be removed or altered from any source distribution. - -René Nyffenegger rene.nyffenegger@adp-gmbh.ch - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/vendored/optional.hpp has the following license - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/vendored/musl/strptime.c has the following license - -Copyright © 2005-2020 Rich Felker, et al. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/apache/thrift -Version: v0.19.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) - --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp - -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ - - --------------------------------------------------- -For lib/py/compat/win32/stdint.h - -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - - --------------------------------------------------- -Codegen template in t_html_generator.h - -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. - ---------------------------------------------------- -For t_cl_generator.cc - - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook - ---------------------------------------------------- - - --------------------------------------------------------------------------------- -Dependency : github.com/poy/eachers -Version: v0.0.0-20181020210610-23942921fe77 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: - -The MIT License (MIT) - -Copyright (c) 2016 Andrew Poydence - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/armon/go-radix -Version: v1.0.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream -Version: v1.6.4 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream@v1.6.4/LICENSE.txt: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +---------------------------------------------------------------------- - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +cpp/src/arrow/vendored/base64.cpp has the following license - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +ZLIB License - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +Copyright (C) 2004-2017 René Nyffenegger - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +3. This notice may not be removed or altered from any source distribution. - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +René Nyffenegger rene.nyffenegger@adp-gmbh.ch - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +-------------------------------------------------------------------------------- - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +The file cpp/src/arrow/vendored/optional.hpp has the following license - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +Boost Software License - Version 1.0 - August 17th, 2003 - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. - END OF TERMS AND CONDITIONS +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. - APPENDIX: How to apply the Apache License to your work. +-------------------------------------------------------------------------------- - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +The file cpp/src/arrow/vendored/musl/strptime.c has the following license - Copyright [yyyy] [name of copyright owner] +Copyright © 2005-2020 Rich Felker, et al. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: - http://www.apache.org/licenses/LICENSE-2.0 +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/configsources -Version: v1.3.16 +Dependency : github.com/apache/thrift +Version: v0.19.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/LICENSE: Apache License @@ -36426,226 +34353,179 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter See the License for the specific language governing permissions and limitations under the License. +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: --------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -Version: v2.6.16 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16/LICENSE.txt: +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + lib/erl/src/Makefile.am - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Please see doc/otp-base-license.txt for the full terms of this license. - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - 1. Definitions. +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +*/ +(By Douglas Crockford ) - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +-------------------------------------------------- +For lib/py/compat/win32/stdint.h - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +-------------------------------------------------- +Codegen template in t_html_generator.h - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +--------------------------------------------------- +For t_cl_generator.cc - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +--------------------------------------------------- - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +-------------------------------------------------------------------------------- +Dependency : github.com/poy/eachers +Version: v0.0.0-20181020210610-23942921fe77 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +The MIT License (MIT) - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Copyright (c) 2016 Andrew Poydence - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS +-------------------------------------------------------------------------------- +Dependency : github.com/armon/go-radix +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - APPENDIX: How to apply the Apache License to your work. +Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +The MIT License (MIT) - Copyright [yyyy] [name of copyright owner] +Copyright (c) 2014 Armon Dadgar - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: - http://www.apache.org/licenses/LICENSE-2.0 +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/ini -Version: v1.8.1 +Dependency : github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream +Version: v1.6.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/ini@v1.8.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream@v1.6.4/LICENSE.txt: Apache License @@ -36852,12 +34732,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/internal/v4a +Dependency : github.com/aws/aws-sdk-go-v2/internal/configsources Version: v1.3.16 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/v4a@v1.3.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16/LICENSE.txt: Apache License @@ -37064,12 +34944,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -Version: v1.11.4 +Dependency : github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +Version: v2.6.16 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding@v1.11.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16/LICENSE.txt: Apache License @@ -37276,12 +35156,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/checksum -Version: v1.3.18 +Dependency : github.com/aws/aws-sdk-go-v2/internal/ini +Version: v1.8.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/checksum@v1.3.18/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/ini@v1.8.1/LICENSE.txt: Apache License @@ -37488,12 +35368,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -Version: v1.11.18 +Dependency : github.com/aws/aws-sdk-go-v2/internal/v4a +Version: v1.3.16 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url@v1.11.18/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/v4a@v1.3.16/LICENSE.txt: Apache License @@ -37700,12 +35580,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/internal/s3shared -Version: v1.17.16 +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding +Version: v1.11.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/s3shared@v1.17.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding@v1.11.4/LICENSE.txt: Apache License @@ -37912,12 +35792,436 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/sso -Version: v1.22.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/checksum +Version: v1.3.18 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/sso@v1.22.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/checksum@v1.3.18/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +Version: v1.11.18 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url@v1.11.18/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/service/internal/s3shared +Version: v1.17.16 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/internal/s3shared@v1.17.16/LICENSE.txt: Apache License @@ -38124,12 +36428,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/ssooidc -Version: v1.26.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/sso +Version: v1.22.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/ssooidc@v1.26.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/sso@v1.22.5/LICENSE.txt: Apache License @@ -38336,134 +36640,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/benbjohnson/clock -Version: v1.3.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/benbjohnson/clock@v1.3.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/beorn7/perks -Version: v1.0.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/beorn7/perks@v1.0.1/LICENSE: - -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/bluekeyes/go-gitdiff -Version: v0.7.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/bluekeyes/go-gitdiff@v0.7.1/LICENSE: - -MIT License - -Copyright (c) 2019 Billy Keyes - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/cenkalti/backoff/v4 -Version: v4.3.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff/v4@v4.3.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/census-instrumentation/opencensus-proto -Version: v0.4.1 +Dependency : github.com/aws/aws-sdk-go-v2/service/ssooidc +Version: v1.26.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/census-instrumentation/opencensus-proto@v0.4.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/ssooidc@v1.26.5/LICENSE.txt: Apache License @@ -38670,18 +36852,16 @@ Contents of probable licence file $GOMODCACHE/github.com/census-instrumentation/ -------------------------------------------------------------------------------- -Dependency : github.com/cilium/ebpf -Version: v0.13.2 +Dependency : github.com/benbjohnson/clock +Version: v1.3.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/cilium/ebpf@v0.13.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/benbjohnson/clock@v1.3.0/LICENSE: -MIT License +The MIT License (MIT) -Copyright (c) 2017 Nathan Sweet -Copyright (c) 2018, 2019 Cloudflare -Copyright (c) 2019 Authors of Cilium +Copyright (c) 2014 Ben Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -38703,214 +36883,127 @@ SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/cncf/xds/go -Version: v0.0.0-20240905190251-b4127c9b8d78 -Licence type (autodetected): Apache-2.0 +Dependency : github.com/beorn7/perks +Version: v1.0.1 +Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/cncf/xds/go@v0.0.0-20240905190251-b4127c9b8d78/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +Contents of probable licence file $GOMODCACHE/github.com/beorn7/perks@v1.0.1/LICENSE: - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +Copyright (C) 2013 Blake Mizerany - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +-------------------------------------------------------------------------------- +Dependency : github.com/bluekeyes/go-gitdiff +Version: v0.7.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +Contents of probable licence file $GOMODCACHE/github.com/bluekeyes/go-gitdiff@v0.7.1/LICENSE: - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +MIT License - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Copyright (c) 2019 Billy Keyes - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +-------------------------------------------------------------------------------- +Dependency : github.com/cenkalti/backoff/v4 +Version: v4.3.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff/v4@v4.3.0/LICENSE: - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +The MIT License (MIT) - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Copyright (c) 2014 Cenk Altı - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS +-------------------------------------------------------------------------------- +Dependency : github.com/cilium/ebpf +Version: v0.13.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - APPENDIX: How to apply the Apache License to your work. +Contents of probable licence file $GOMODCACHE/github.com/cilium/ebpf@v0.13.2/LICENSE: - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +MIT License - Copyright [yyyy] [name of copyright owner] +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - http://www.apache.org/licenses/LICENSE-2.0 +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------------------- @@ -39628,470 +37721,15 @@ Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0. -------------------------------------------------------------------------------- -Dependency : github.com/dgryski/go-farm -Version: v0.0.0-20200201041132-a6ae2369ad13 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-farm@v0.0.0-20200201041132-a6ae2369ad13/LICENSE: - -Copyright (c) 2014-2017 Damian Gryski -Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - - --------------------------------------------------------------------------------- -Dependency : github.com/dimchansky/utfbom -Version: v1.1.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/dimchansky/utfbom@v1.1.0/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/distribution/reference -Version: v0.6.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/distribution/reference@v0.6.0/LICENSE: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - - --------------------------------------------------------------------------------- -Dependency : github.com/dlclark/regexp2 -Version: v1.4.0 +Dependency : github.com/dgryski/go-farm +Version: v0.0.0-20200201041132-a6ae2369ad13 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dlclark/regexp2@v1.4.0/LICENSE: - -The MIT License (MIT) +Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-farm@v0.0.0-20200201041132-a6ae2369ad13/LICENSE: -Copyright (c) Doug Clark +Copyright (c) 2014-2017 Damian Gryski +Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -40100,68 +37738,30 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/dnephin/pflag -Version: v1.0.7 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/dnephin/pflag@v1.0.7/LICENSE: - -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/docker/go-metrics -Version: v0.0.1 +Dependency : github.com/dimchansky/utfbom +Version: v1.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0.1/LICENSE: - +Contents of probable licence file $GOMODCACHE/github.com/dimchansky/utfbom@v1.1.0/LICENSE: Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -40336,13 +37936,24 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0. END OF TERMS AND CONDITIONS - Copyright 2013-2016 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -40352,75 +37963,14 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0. -------------------------------------------------------------------------------- -Dependency : github.com/eapache/go-xerial-snappy -Version: v0.0.0-20180814174437-776d5712da21 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20180814174437-776d5712da21/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2016 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/eapache/queue -Version: v1.1.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/eapache/queue@v1.1.0/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-transport-go/v8 -Version: v8.6.0 +Dependency : github.com/distribution/reference +Version: v0.6.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.6.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/distribution/reference@v0.6.0/LICENSE: - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -40600,7 +38150,7 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -40608,7 +38158,7 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40623,18 +38173,88 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transpo limitations under the License. + -------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-windows -Version: v1.0.2 +Dependency : github.com/dlclark/regexp2 +Version: v1.4.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dlclark/regexp2@v1.4.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) Doug Clark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/dnephin/pflag +Version: v1.0.7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dnephin/pflag@v1.0.7/LICENSE: + +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/docker/go-metrics +Version: v0.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0.1/LICENSE: Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -40809,24 +38429,13 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -40836,16 +38445,16 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 -------------------------------------------------------------------------------- -Dependency : github.com/elastic/pkcs8 -Version: v1.0.0 +Dependency : github.com/eapache/go-xerial-snappy +Version: v0.0.0-20180814174437-776d5712da21 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/pkcs8@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20180814174437-776d5712da21/LICENSE: The MIT License (MIT) -Copyright (c) 2014 youmark +Copyright (c) 2016 Evan Huus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -40865,118 +38474,44 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/elazarl/goproxy -Version: v0.0.0-20240909085733-6741dbfc16a1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: - -Copyright (c) 2012 Elazar Leibovich. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Elazar Leibovich. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- -Dependency : github.com/elazarl/goproxy/ext -Version: v0.0.0-20240909085733-6741dbfc16a1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy/ext@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: - -Copyright (c) 2012 Elazar Leibovich. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Elazar Leibovich. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/emicklei/go-restful/v3 -Version: v3.11.0 +Dependency : github.com/eapache/queue +Version: v1.1.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/emicklei/go-restful/v3@v3.11.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/eapache/queue@v1.1.0/LICENSE: -Copyright (c) 2012,2013 Ernest Micklei +The MIT License (MIT) -MIT License +Copyright (c) 2014 Evan Huus -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/envoyproxy/go-control-plane -Version: v0.13.0 +Dependency : github.com/elastic/elastic-transport-go/v8 +Version: v8.6.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/go-control-plane@v0.13.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.6.0/LICENSE: Apache License Version 2.0, January 2004 @@ -41158,7 +38693,7 @@ Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/go-control-p APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -41166,7 +38701,7 @@ Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/go-control-p same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -41182,12 +38717,12 @@ Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/go-control-p -------------------------------------------------------------------------------- -Dependency : github.com/envoyproxy/protoc-gen-validate -Version: v1.1.0 +Dependency : github.com/elastic/go-windows +Version: v1.0.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/protoc-gen-validate@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0.2/LICENSE.txt: Apache License @@ -41393,6 +38928,141 @@ Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/protoc-gen-v limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/pkcs8 +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/pkcs8@v1.0.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 youmark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +Dependency : github.com/elazarl/goproxy +Version: v0.0.0-20240909085733-6741dbfc16a1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: + +Copyright (c) 2012 Elazar Leibovich. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Elazar Leibovich. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elazarl/goproxy/ext +Version: v0.0.0-20240909085733-6741dbfc16a1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elazarl/goproxy/ext@v0.0.0-20240909085733-6741dbfc16a1/LICENSE: + +Copyright (c) 2012 Elazar Leibovich. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Elazar Leibovich. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/emicklei/go-restful/v3 +Version: v3.11.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/emicklei/go-restful/v3@v3.11.0/LICENSE: + +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------------------- Dependency : github.com/evanphx/json-patch Version: v5.6.0+incompatible @@ -45416,11 +43086,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/googleapis/enterprise-certificate-proxy -Version: v0.3.4 +Version: v0.3.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/enterprise-certificate-proxy@v0.3.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/enterprise-certificate-proxy@v0.3.2/LICENSE: Apache License @@ -53836,45 +51506,6 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/planetscale/vtprotobuf -Version: v0.6.1-0.20240319094008-0393e58bdf10 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/planetscale/vtprotobuf@v0.6.1-0.20240319094008-0393e58bdf10/LICENSE: - -Copyright (c) 2021, PlanetScale Inc. All rights reserved. -Copyright (c) 2013, The GoGo Authors. All rights reserved. -Copyright (c) 2018 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/pmezard/go-difflib Version: v1.0.1-0.20181226105442-5d4384ee4fb2 @@ -56548,436 +54179,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector@v0.1 -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/config/configtelemetry -Version: v0.109.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/config/configtelemetry@v0.109.0/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/consumer/consumerprofiles -Version: v0.109.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumerprofiles@v0.109.0/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/consumer/consumertest +Dependency : go.opentelemetry.io/collector/config/configtelemetry Version: v0.109.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumertest@v0.109.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/config/configtelemetry@v0.109.0/LICENSE: Apache License @@ -57184,12 +54391,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/cons -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/collector/pdata/pprofile +Dependency : go.opentelemetry.io/collector/consumer/consumerprofiles Version: v0.109.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdata/pprofile@v0.109.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumerprofiles@v0.109.0/LICENSE: Apache License @@ -57396,12 +54603,13 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdat -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/contrib/detectors/gcp -Version: v1.29.0 +Dependency : go.opentelemetry.io/collector/consumer/consumertest +Version: v0.109.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/detectors/gcp@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/consumer/consumertest@v0.109.0/LICENSE: + Apache License Version 2.0, January 2004 @@ -57607,12 +54815,13 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/detect -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc -Version: v0.54.0 +Dependency : go.opentelemetry.io/collector/pdata/pprofile +Version: v0.109.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.54.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdata/pprofile@v0.109.0/LICENSE: + Apache License Version 2.0, January 2004 @@ -57818,12 +55027,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instru -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -Version: v0.54.0 +Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +Version: v0.49.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp@v0.54.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.49.0/LICENSE: Apache License Version 2.0, January 2004 @@ -58029,12 +55238,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instru -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel -Version: v1.29.0 +Dependency : go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +Version: v0.53.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp@v0.53.0/LICENSE: Apache License Version 2.0, January 2004 @@ -58240,12 +55449,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.29.0/L -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace -Version: v1.28.0 +Dependency : go.opentelemetry.io/otel +Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.28.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -58451,12 +55660,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp +Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace Version: v1.28.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp@v1.28.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.28.0/LICENSE: Apache License Version 2.0, January 2004 @@ -58662,12 +55871,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/metric -Version: v1.29.0 +Dependency : go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp +Version: v1.28.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp@v1.28.0/LICENSE: Apache License Version 2.0, January 2004 @@ -58873,12 +56082,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1 -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/sdk +Dependency : go.opentelemetry.io/otel/metric Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -59084,12 +56293,12 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.29 -------------------------------------------------------------------------------- -Dependency : go.opentelemetry.io/otel/sdk/metric +Dependency : go.opentelemetry.io/otel/sdk Version: v1.29.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk/metric@v1.29.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.29.0/LICENSE: Apache License Version 2.0, January 2004 @@ -59882,13 +57091,13 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/xerrors -Version: v0.0.0-20240903120638-7835f813f4da +Version: v0.0.0-20231012003039-104605ab7028 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20240903120638-7835f813f4da/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20231012003039-104605ab7028/LICENSE: -Copyright 2019 The Go Authors. +Copyright (c) 2019 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -59900,7 +57109,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google LLC nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -59951,11 +57160,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20240903143218-8af14fe29dc1 +Version: v0.0.0-20240730163845-b1a4ccb954bf Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20240903143218-8af14fe29dc1/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20240730163845-b1a4ccb954bf/LICENSE: Apache License @@ -60163,223 +57372,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0- -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto/googleapis/rpc -Version: v0.0.0-20240903143218-8af14fe29dc1 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20240903143218-8af14fe29dc1/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : google.golang.org/grpc/stats/opentelemetry -Version: v0.0.0-20240907200651-3ffb98b2c93a +Version: v0.0.0-20240822170219-fc7c04adadcd Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/grpc/stats/opentelemetry@v0.0.0-20240907200651-3ffb98b2c93a/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20240822170219-fc7c04adadcd/LICENSE: Apache License diff --git a/go.mod b/go.mod index 0f3c26503ca9..75aba4602368 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/elastic/beats/v7 go 1.22.0 require ( - cloud.google.com/go/bigquery v1.63.1 - cloud.google.com/go/monitoring v1.21.0 - cloud.google.com/go/pubsub v1.42.0 + cloud.google.com/go/bigquery v1.62.0 + cloud.google.com/go/monitoring v1.20.4 + cloud.google.com/go/pubsub v1.41.0 code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a // indirect @@ -137,15 +137,15 @@ require ( golang.org/x/crypto v0.27.0 golang.org/x/mod v0.21.0 golang.org/x/net v0.29.0 - golang.org/x/oauth2 v0.23.0 + golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.25.0 - google.golang.org/api v0.197.0 - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.66.2 + google.golang.org/api v0.191.0 + google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect + google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 gopkg.in/inf.v0 v0.9.1 gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect @@ -162,9 +162,9 @@ require ( ) require ( - cloud.google.com/go v0.115.1 - cloud.google.com/go/compute v1.28.0 - cloud.google.com/go/redis v1.17.0 + cloud.google.com/go v0.115.0 + cloud.google.com/go/compute v1.27.4 + cloud.google.com/go/redis v1.16.4 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 @@ -224,18 +224,17 @@ require ( go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/receiver v0.109.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( aqwari.net/xml v0.0.0-20210331023308-d9421b293817 // indirect - cel.dev/expr v0.16.1 // indirect - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.8.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect - cloud.google.com/go/iam v1.2.1 // indirect - cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.1.12 // indirect + cloud.google.com/go/longrunning v0.5.11 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/azure-amqp-common-go/v4 v4.2.0 // indirect @@ -249,9 +248,6 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/andybalholm/brotli v1.0.5 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect @@ -272,9 +268,7 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bluekeyes/go-gitdiff v0.7.1 // indirect - github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cilium/ebpf v0.13.2 // indirect - github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 // indirect github.com/cyphar/filepath-securejoin v0.2.5 // indirect @@ -291,8 +285,6 @@ require ( github.com/elazarl/goproxy v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -317,7 +309,7 @@ require ( github.com/google/licenseclassifier v0.0.0-20221004142553-c1ed8fcf4bab // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -364,7 +356,6 @@ require ( github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.20.2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect @@ -384,20 +375,16 @@ require ( go.opentelemetry.io/collector/config/configtelemetry v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.109.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 // indirect golang.org/x/term v0.24.0 // indirect - golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect @@ -410,7 +397,7 @@ require ( ) require ( - cloud.google.com/go/storage v1.44.0 + cloud.google.com/go/storage v1.43.0 github.com/PaloAltoNetworks/pango v0.10.2 github.com/dlclark/regexp2 v1.4.0 // indirect github.com/google/gofuzz v1.2.0 // indirect diff --git a/go.sum b/go.sum index 0362a16115fe..3eadfda45096 100644 --- a/go.sum +++ b/go.sum @@ -1,41 +1,35 @@ aqwari.net/xml v0.0.0-20210331023308-d9421b293817 h1:+3Rh5EaTzNLnzWx3/uy/mAaH/dGI7svJ6e0oOIDcPuE= aqwari.net/xml v0.0.0-20210331023308-d9421b293817/go.mod h1:c7kkWzc7HS/t8Q2DcVY8P2d1dyWNEhEVT5pL0ZHO11c= -cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.8.0 h1:y8jUJLl/Fg+qNBWxP/Hox2ezJvjkrPb952PC1p0G6A4= +cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= -cloud.google.com/go/compute v1.28.0 h1:OPtBxMcheSS+DWfci803qvPly3d4w7Eu5ztKBcFfzwk= -cloud.google.com/go/compute v1.28.0/go.mod h1:DEqZBtYrDnD5PvjsKwb3onnhX+qjdCVM7eshj1XdjV4= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -cloud.google.com/go/datacatalog v1.22.1 h1:i0DyKb/o7j+0vgaFtimcRFjYsD6wFw1jpnODYUyiYRs= -cloud.google.com/go/datacatalog v1.22.1/go.mod h1:MscnJl9B2lpYlFoxRjicw19kFTwEke8ReKL5Y/6TWg8= -cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= -cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= -cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= -cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= -cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= -cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= -cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= -cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= -cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= -cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= -cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= -cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= -cloud.google.com/go/redis v1.17.0 h1:YItghJ0VY98gJperCaTVEe7g+QZWz1nsN5ioJcSxkDY= -cloud.google.com/go/redis v1.17.0/go.mod h1:pzTdaIhriMLiXu8nn2CgiS52SYko0tO1Du4d3MPOG5I= -cloud.google.com/go/storage v1.44.0 h1:abBzXf4UJKMmQ04xxJf9dYM/fNl24KHoTuBjyJDX2AI= -cloud.google.com/go/storage v1.44.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= -cloud.google.com/go/trace v1.11.0 h1:UHX6cOJm45Zw/KIbqHe4kII8PupLt/V5tscZUkeiJVI= -cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM= +cloud.google.com/go/bigquery v1.62.0 h1:SYEA2f7fKqbSRRBHb7g0iHTtZvtPSPYdXfmqsjpsBwo= +cloud.google.com/go/bigquery v1.62.0/go.mod h1:5ee+ZkF1x/ntgCsFQJAQTM3QkAZOecfCmvxhkJsWRSA= +cloud.google.com/go/compute v1.27.4 h1:XM8ulx6crjdl09XBfji7viFgZOEQuIxBwKmjRH9Rtmc= +cloud.google.com/go/compute v1.27.4/go.mod h1:7JZS+h21ERAGHOy5qb7+EPyXlQwzshzrx1x6L9JhTqU= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/datacatalog v1.20.5 h1:Cosg/L60myEbpP1HoNv77ykV7zWe7hqSwY4uUDmhx/I= +cloud.google.com/go/datacatalog v1.20.5/go.mod h1:DB0QWF9nelpsbB0eR/tA0xbHZZMvpoFD1XFy3Qv/McI= +cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= +cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/kms v1.18.4 h1:dYN3OCsQ6wJLLtOnI8DGUwQ5shMusXsWCCC+s09ATsk= +cloud.google.com/go/kms v1.18.4/go.mod h1:SG1bgQ3UWW6/KdPo9uuJnzELXY5YTTMJtDYvajiQ22g= +cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= +cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/monitoring v1.20.4 h1:zwcViK7mT9SV0kzKqLOI3spRadvsmvw/R9z1MHNeC0E= +cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= +cloud.google.com/go/pubsub v1.41.0 h1:ZPaM/CvTO6T+1tQOs/jJ4OEMpjtel0PTLV7j1JK+ZrI= +cloud.google.com/go/pubsub v1.41.0/go.mod h1:g+YzC6w/3N91tzG66e2BZtp7WrpBBMXVa3Y9zVoOGpk= +cloud.google.com/go/redis v1.16.4 h1:9CO6EcuM9/CpgtcjG6JZV+GFw3oDrRfwLwmvwo/uM1o= +cloud.google.com/go/redis v1.16.4/go.mod h1:unCVfLP5eFrVhGLDnb7IaSaWxuZ+7cBgwwBwbdG9m9w= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+BIiGf+RjgcXLm3NoZaLIJsBXJuUa63Lx8= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunOIA4OODh7djZbk48qqbowNFI= @@ -119,14 +113,6 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzS github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -261,8 +247,6 @@ github.com/cavaliergopher/rpm v1.2.0/go.mod h1:R0q3vTqa7RUvPofAZYrnjJ63hh2vngjFf github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.13.2 h1:uhLimLX+jF9BTPPvoCUYh/mBeoONkjgaJ9w9fn0mRj4= @@ -275,8 +259,6 @@ github.com/cloudfoundry/noaa v2.1.0+incompatible/go.mod h1:5LmacnptvxzrTvMfL9+EJ github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI= github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4/go.mod h1:GS0pCHd7onIsewbw8Ue9qa9pZPv2V88cUZDttK6KzgI= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= -github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= @@ -418,11 +400,7 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -579,8 +557,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= -github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -807,8 +785,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -971,12 +947,10 @@ go.opentelemetry.io/collector/pdata/pprofile v0.109.0 h1:5lobQKeHk8p4WC7KYbzL6Zq go.opentelemetry.io/collector/pdata/pprofile v0.109.0/go.mod h1:lXIifCdtR5ewO17JAYTUsclMqRp6h6dCowoXHhGyw8Y= go.opentelemetry.io/collector/receiver v0.109.0 h1:DTOM7xaDl7FUGQIjvjmWZn03JUE+aG4mJzWWfb7S8zw= go.opentelemetry.io/collector/receiver v0.109.0/go.mod h1:jeiCHaf3PE6aXoZfHF5Uexg7aztu+Vkn9LVw0YDKm6g= -go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= -go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= @@ -987,8 +961,6 @@ go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2 go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= -go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -1081,8 +1053,8 @@ golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1198,32 +1170,30 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= -google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= +google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= +google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 839733f37844da76a8a315b7678891e9703b9605 Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Wed, 16 Oct 2024 17:49:20 -0300 Subject: [PATCH 46/90] chore: update remaining CI pipelines to k8s v1.31.0 (#41081) * chore: update pipeline to k8s v1.31.0 * print error to debug test failures --- .buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml | 2 +- .buildkite/filebeat/filebeat-pipeline.yml | 2 +- .buildkite/metricbeat/pipeline.yml | 2 +- dev-tools/mage/gotest.go | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml b/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml index 3feff930cf90..dd74b6b47045 100644 --- a/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml +++ b/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml @@ -6,7 +6,7 @@ env: MODULE: "kubernetes" # Other deps - ASDF_KIND_VERSION: "0.20.0" + ASDF_KIND_VERSION: "0.24.0" IMAGE_BEATS_WITH_HOOKS_LATEST: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index d882cf1c9340..2b58709b2138 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -24,7 +24,7 @@ env: ASDF_MAGE_VERSION: 1.15.0 # Integration Tests - K8S_VERSION: "v1.29.0" + K8S_VERSION: "v1.31.0" ASDF_KIND_VERSION: "0.20.0" # Unit tests diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index a23fc121d38e..1a9dab4a2f95 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -21,7 +21,7 @@ env: IMAGE_BEATS_WITH_HOOKS_LATEST: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" # Integration Tests - K8S_VERSION: "v1.29.0" + K8S_VERSION: "v1.31.0" ASDF_KIND_VERSION: "0.20.0" # Module Tests diff --git a/dev-tools/mage/gotest.go b/dev-tools/mage/gotest.go index d8244403c9cc..ecc8f277b941 100644 --- a/dev-tools/mage/gotest.go +++ b/dev-tools/mage/gotest.go @@ -217,6 +217,7 @@ func goTestIntegrationForSingleModule(ctx context.Context, module string) error return nil }) if err != nil { + fmt.Printf("Error: failed to run integration tests for module %s:\n%v\n", fi.Name(), err) // err will already be report to stdout, collect failed module to report at end failedModules = append(failedModules, fi.Name()) } From a706c7912ef9201efe75b5890c7af14b52257028 Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Wed, 16 Oct 2024 14:20:40 -0700 Subject: [PATCH 47/90] [Release] Modify mergify config for backporting to 8.16 branch (#41275) --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 9c0cd008e24b..f44b0439484b 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -355,6 +355,19 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.16 branch + conditions: + - merged + - label~=^(backport-v8.16.0|backport-8.16)$ + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.16" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - name: backport patches to 8.x branch conditions: - merged From 4e62fa54b4ff54f597b5ffaf663a8d2664fa5f63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Thu, 17 Oct 2024 15:41:42 +0200 Subject: [PATCH 48/90] Fix Metricbeat k8s metadata sometimes not being present at startup (#41216) * Fix Metricbeat k8s metadata sometimes not being present at startup * Clone the whole metadata map when fetching --- CHANGELOG.next.asciidoc | 1 + .../module/kubernetes/util/kubernetes.go | 280 ++++++++---------- .../module/kubernetes/util/kubernetes_test.go | 150 +++------- 3 files changed, 177 insertions(+), 254 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0ac51f5990c5..1886842df294 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -207,6 +207,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add AWS OwningAccount support for cross account monitoring {issue}40570[40570] {pull}40691[40691] - Use namespace for GetListMetrics when exists in AWS {pull}41022[41022] - Fix http server helper SSL config. {pull}39405[39405] +- Fix Kubernetes metadata sometimes not being present after startup {pull}41216[41216] *Osquerybeat* diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 581c9417516d..c17f5ba97183 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -20,6 +20,7 @@ package util import ( "errors" "fmt" + "maps" "strings" "sync" "time" @@ -39,6 +40,10 @@ import ( "github.com/elastic/beats/v7/metricbeat/mb" ) +// Resource metadata keys are composed of multiple parts - usually just the namespace and name. This string is the +// separator between the parts when treating the key as a single string. +const resourceMetadataKeySeparator = "/" + type kubernetesConfig struct { KubeConfig string `config:"kube_config"` KubeAdm bool `config:"use_kubeadm"` @@ -67,12 +72,13 @@ type Enricher interface { type enricher struct { sync.RWMutex - metadata map[string]mapstr.M + metadataCache map[string]mapstr.M index func(mapstr.M) string updateFunc func(kubernetes.Resource) map[string]mapstr.M deleteFunc func(kubernetes.Resource) []string metricsetName string resourceName string + watcher *metaWatcher isPod bool config *kubernetesConfig log *logp.Logger @@ -90,8 +96,7 @@ type metaWatcher struct { metricsetsUsing []string // list of metricsets using this shared watcher(e.g. pod, container, state_pod) - enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name. Each metricset has its own enricher - metadataObjects map[string]bool // representation of a set of ids(in the form of namespace_name-resource_name) of each object received by the watcher's handler functions + enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name. Each metricset has its own enricher nodeScope bool // whether this watcher should watch for resources in current node or in whole cluster restartWatcher kubernetes.Watcher // whether this watcher needs a restart. Only relevant in leader nodes due to metricsets with different nodescope(pod, state_pod) @@ -179,10 +184,10 @@ func getExtraWatchers(resourceName string, addResourceMetadata *metadata.AddReso // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod - if addResourceMetadata != nil && addResourceMetadata.Deployment { + if addResourceMetadata.Deployment { extra = append(extra, ReplicaSetResource) } - if addResourceMetadata != nil && addResourceMetadata.CronJob { + if addResourceMetadata.CronJob { extra = append(extra, JobResource) } return extra @@ -320,47 +325,82 @@ func createWatcher( // Check if a watcher for the specific resource already exists. resourceMetaWatcher, ok := resourceWatchers.metaWatchersMap[resourceName] - // If it does not exist, create the resourceMetaWatcher. - if !ok { - // Check if we need to add namespace to the watcher's options. - if isNamespaced(resourceName) { - options.Namespace = namespace - } - watcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) - if err != nil { - return false, err - } - resourceWatchers.metaWatchersMap[resourceName] = &metaWatcher{ - watcher: watcher, - started: false, // not started yet - metadataObjects: make(map[string]bool), - enrichers: make(map[string]*enricher), - metricsetsUsing: make([]string, 0), - restartWatcher: nil, - nodeScope: nodeScope, - } - return true, nil - } else if resourceMetaWatcher.nodeScope != nodeScope && resourceMetaWatcher.nodeScope { - // It might happen that the watcher already exists, but is only being used to monitor the resources - // of a single node(e.g. created by pod metricset). In that case, we need to check if we are trying to create a new watcher that will track - // the resources of whole cluster(e.g. in case of state_pod metricset). - // If it is the case, then we need to update the watcher by changing its watch options (removing options.Node) - // A running watcher cannot be updated directly. Instead, we must create a new one with the correct watch options. - // The new restartWatcher must be identical to the old watcher, including the same handler function, with the only difference being the watch options. - - if isNamespaced(resourceName) { - options.Namespace = namespace + // If the watcher exists, exit + if ok { + if resourceMetaWatcher.nodeScope != nodeScope && resourceMetaWatcher.nodeScope { + // It might happen that the watcher already exists, but is only being used to monitor the resources + // of a single node(e.g. created by pod metricset). In that case, we need to check if we are trying to create a new watcher that will track + // the resources of whole cluster(e.g. in case of state_pod metricset). + // If it is the case, then we need to update the watcher by changing its watch options (removing options.Node) + // A running watcher cannot be updated directly. Instead, we must create a new one with the correct watch options. + // The new restartWatcher must be identical to the old watcher, including the same handler function, with the only difference being the watch options. + + if isNamespaced(resourceName) { + options.Namespace = namespace + } + restartWatcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) + if err != nil { + return false, err + } + // update the handler of the restartWatcher to match the current watcher's handler. + restartWatcher.AddEventHandler(resourceMetaWatcher.watcher.GetEventHandler()) + resourceMetaWatcher.restartWatcher = restartWatcher + resourceMetaWatcher.nodeScope = nodeScope } - restartWatcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) - if err != nil { - return false, err + return false, nil + } + // Watcher doesn't exist, create it + + // Check if we need to add namespace to the watcher's options. + if isNamespaced(resourceName) { + options.Namespace = namespace + } + watcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) + if err != nil { + return false, err + } + + resourceMetaWatcher = &metaWatcher{ + watcher: watcher, + started: false, // not started yet + enrichers: make(map[string]*enricher), + metricsetsUsing: make([]string, 0), + restartWatcher: nil, + nodeScope: nodeScope, + } + resourceWatchers.metaWatchersMap[resourceName] = resourceMetaWatcher + + // Add event handlers to the watcher. The only action we need to do here is invalidate the enricher cache. + addEventHandlerToWatcher(resourceMetaWatcher, resourceWatchers) + + return true, nil +} + +// addEventHandlerToWatcher adds an event handler to the watcher that invalidates the cache of enrichers attached +// to the watcher. +func addEventHandlerToWatcher(metaWatcher *metaWatcher, resourceWatchers *Watchers) { + notifyFunc := func(obj interface{}) { + enrichers := make(map[string]*enricher, len(metaWatcher.enrichers)) + + resourceWatchers.lock.Lock() + maps.Copy(enrichers, metaWatcher.enrichers) + resourceWatchers.lock.Unlock() + + for _, enricher := range enrichers { + enricher.Lock() + ids := enricher.deleteFunc(obj.(kubernetes.Resource)) + // update this watcher events by removing all the metadata[id] + for _, id := range ids { + delete(enricher.metadataCache, id) + } + enricher.Unlock() } - // update the handler of the restartWatcher to match the current watcher's handler. - restartWatcher.AddEventHandler(resourceMetaWatcher.watcher.GetEventHandler()) - resourceMetaWatcher.restartWatcher = restartWatcher - resourceMetaWatcher.nodeScope = nodeScope } - return false, nil + metaWatcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) {}, // do nothing + UpdateFunc: notifyFunc, + DeleteFunc: notifyFunc, + }) } // addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the shared watcher @@ -613,6 +653,7 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } + _, _ = specificMetaGen, generalMetaGen // necessary for earlier versions of golangci-lint // updateFunc to be used as the resource watchers add and update handler. // The handler function is executed when a watcher is triggered(i.e. new/updated resource). // It is responsible for generating the metadata for a detected resource by executing the metadata generators Generate method. @@ -922,7 +963,7 @@ func getString(m mapstr.M, key string) string { } func join(fields ...string) string { - return strings.Join(fields, ":") + return strings.Join(fields, resourceMetadataKeySeparator) } // buildMetadataEnricher builds and returns a metadata enricher for a given metricset. @@ -940,7 +981,7 @@ func buildMetadataEnricher( log *logp.Logger) *enricher { enricher := &enricher{ - metadata: map[string]mapstr.M{}, + metadataCache: map[string]mapstr.M{}, index: indexFunc, updateFunc: updateFunc, deleteFunc: deleteFunc, @@ -958,104 +999,7 @@ func buildMetadataEnricher( if resourceMetaWatcher != nil { // Append the new enricher to watcher's enrichers map. resourceMetaWatcher.enrichers[metricsetName] = enricher - - // Check if this shared watcher has already detected resources and collected their - // metadata for another enricher. - // In that case, for each resource, call the updateFunc of the current enricher to - // generate its metadata. This is needed in cases where the watcher has already been - // notified for new/updated resources while the enricher for current metricset has not - // built yet (example is pod, state_pod metricsets). - for key := range resourceMetaWatcher.metadataObjects { - obj, exists, err := resourceMetaWatcher.watcher.Store().GetByKey(key) - if err != nil { - log.Errorf("Error trying to get the object from the store: %s", err) - } else { - if exists { - newMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) - // add the new metadata to the watcher received metadata - for id, metadata := range newMetadataEvents { - enricher.metadata[id] = metadata - } - } - } - } - - // AddEventHandler sets add, update and delete methods of watcher. - // Those methods are triggered when an event is detected for a - // resource creation, update or deletion. - resourceMetaWatcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - resourceWatchers.lock.Lock() - defer resourceWatchers.lock.Unlock() - - // Add object(detected resource) to the list of metadata objects of this watcher, - // so it can be used by enrichers created after the event is triggered. - // The identifier of the object is in the form of namespace/name so that - // it can be easily fetched from watcher's store in previous step. - accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) - id := accessor.GetName() - namespace := accessor.GetNamespace() - if namespace != "" { - id = namespace + "/" + id - } - resourceMetaWatcher.metadataObjects[id] = true - // Execute the updateFunc of each enricher associated to thos watcher. - for _, enricher := range resourceMetaWatcher.enrichers { - enricher.Lock() - newMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) - // add the new metadata to the watcher received metadata - for id, metadata := range newMetadataEvents { - enricher.metadata[id] = metadata - } - enricher.Unlock() - } - }, - UpdateFunc: func(obj interface{}) { - resourceWatchers.lock.Lock() - defer resourceWatchers.lock.Unlock() - - // Add object to the list of metadata objects of this watcher - accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) - id := accessor.GetName() - namespace := accessor.GetNamespace() - if namespace != "" { - id = namespace + "/" + id - } - resourceMetaWatcher.metadataObjects[id] = true - - for _, enricher := range resourceMetaWatcher.enrichers { - enricher.Lock() - updatedMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) - for id, metadata := range updatedMetadataEvents { - enricher.metadata[id] = metadata - } - enricher.Unlock() - } - }, - DeleteFunc: func(obj interface{}) { - resourceWatchers.lock.Lock() - defer resourceWatchers.lock.Unlock() - - // Remove object from the list of metadata objects of this watcher - accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) - id := accessor.GetName() - namespace := accessor.GetNamespace() - if namespace != "" { - id = namespace + "/" + id - } - delete(resourceMetaWatcher.metadataObjects, id) - - for _, enricher := range resourceMetaWatcher.enrichers { - enricher.Lock() - ids := enricher.deleteFunc(obj.(kubernetes.Resource)) - // update this watcher events by removing all the metadata[id] - for _, id := range ids { - delete(enricher.metadata, id) - } - enricher.Unlock() - } - }, - }) + enricher.watcher = resourceMetaWatcher } return enricher @@ -1142,11 +1086,8 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { // This method is executed whenever a new event is created and about to be published. // The enricher's index method is used to retrieve the resource identifier from each event. func (e *enricher) Enrich(events []mapstr.M) { - e.RLock() - defer e.RUnlock() - for _, event := range events { - if meta := e.metadata[e.index(event)]; meta != nil { + if meta := e.getMetadata(event); meta != nil { k8s, err := meta.GetValue("kubernetes") if err != nil { continue @@ -1163,10 +1104,9 @@ func (e *enricher) Enrich(events []mapstr.M) { } // don't apply pod metadata to module level - k8sMeta = k8sMeta.Clone() delete(k8sMeta, "pod") } - ecsMeta := meta.Clone() + ecsMeta := meta err = ecsMeta.Delete("kubernetes") if err != nil { logp.Debug("kubernetes", "Failed to delete field '%s': %s", "kubernetes", err) @@ -1180,6 +1120,48 @@ func (e *enricher) Enrich(events []mapstr.M) { } } +// getMetadata returns metadata for the given event. If the metadata doesn't exist in the cache, we try to get it +// from the watcher store. +// The returned map is copy to be owned by the caller. +func (e *enricher) getMetadata(event mapstr.M) mapstr.M { + e.Lock() + defer e.Unlock() + metaKey := e.index(event) + eventMeta := e.metadataCache[metaKey] + if eventMeta == nil { + e.updateMetadataCacheFromWatcher(metaKey) + eventMeta = e.metadataCache[metaKey] + } + if eventMeta != nil { + eventMeta = eventMeta.Clone() + } + return eventMeta +} + +// updateMetadataCacheFromWatcher updates the metadata cache for the given key with data from the watcher. +func (e *enricher) updateMetadataCacheFromWatcher(key string) { + storeKey := getWatcherStoreKeyFromMetadataKey(key) + if res, exists, _ := e.watcher.watcher.Store().GetByKey(storeKey); exists { + eventMetaMap := e.updateFunc(res.(kubernetes.Resource)) + for k, v := range eventMetaMap { + e.metadataCache[k] = v + } + } +} + +// getWatcherStoreKeyFromMetadataKey returns a watcher store key for a given metadata cache key. These are identical +// for nearly all resources, and have the form `{namespace}/{name}`, with the exception of containers, where it's +// `{namespace}/{pod_name}/{container_name}`. In that case, we want the Pod key, so we drop the final part. +func getWatcherStoreKeyFromMetadataKey(metaKey string) string { + parts := strings.Split(metaKey, resourceMetadataKeySeparator) + if len(parts) <= 2 { // normal K8s resource + return metaKey + } + + // container, we need to remove the final part to get the Pod key + return strings.Join(parts[:2], resourceMetadataKeySeparator) +} + func CreateEvent(event mapstr.M, namespace string) (mb.Event, error) { var moduleFieldsMapStr mapstr.M moduleFields, ok := event[mb.ModuleDataKey] diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 61da906372f4..703035d5c38d 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -469,13 +471,14 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() - resourceWatchers.metaWatchersMap[PodResource] = &metaWatcher{ - watcher: &mockWatcher{}, + watcher := &metaWatcher{ + watcher: newMockWatcher(), started: false, metricsetsUsing: []string{"pod"}, - metadataObjects: make(map[string]bool), enrichers: make(map[string]*enricher), } + resourceWatchers.metaWatchersMap[PodResource] = watcher + addEventHandlerToWatcher(watcher, resourceWatchers) resourceWatchers.lock.Unlock() funcs := mockFuncs{} @@ -489,8 +492,10 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { Namespace: "default", }, } - id := "default/enrich" - metadataObjects := map[string]bool{id: true} + events := []mapstr.M{ + {"name": "unknown"}, + {"name": "enrich"}, + } config := &kubernetesConfig{ Namespace: "test-ns", @@ -509,30 +514,22 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() wData := resourceWatchers.metaWatchersMap[PodResource] - mockW := wData.watcher.(*mockWatcher) + mockW, ok := wData.watcher.(*mockWatcher) + require.True(t, ok) require.NotNil(t, mockW.handler) resourceWatchers.lock.Unlock() enricher.Start(resourceWatchers) resourceWatchers.lock.Lock() - watcher := resourceWatchers.metaWatchersMap[PodResource] require.True(t, watcher.started) - mockW = watcher.watcher.(*mockWatcher) resourceWatchers.lock.Unlock() mockW.handler.OnAdd(resource) - - resourceWatchers.lock.Lock() - require.Equal(t, metadataObjects, watcher.metadataObjects) - resourceWatchers.lock.Unlock() - - require.Equal(t, resource, funcs.updated) + err := mockW.Store().Add(resource) + require.NoError(t, err) // Test enricher - events := []mapstr.M{ - {"name": "unknown"}, - {"name": "enrich"}, - } + enricher.Enrich(events) require.Equal(t, []mapstr.M{ @@ -544,6 +541,8 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { }, }, events) + require.Equal(t, resource, funcs.updated) + // Enrich a pod (metadata goes in root level) events = []mapstr.M{ {"name": "unknown"}, @@ -565,14 +564,13 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { // Emit delete event resourceWatchers.lock.Lock() wData = resourceWatchers.metaWatchersMap[PodResource] - mockW = wData.watcher.(*mockWatcher) + mockW, ok = wData.watcher.(*mockWatcher) + require.True(t, ok) resourceWatchers.lock.Unlock() mockW.handler.OnDelete(resource) - - resourceWatchers.lock.Lock() - require.Equal(t, map[string]bool{}, watcher.metadataObjects) - resourceWatchers.lock.Unlock() + err = mockW.Store().Delete(resource) + require.NoError(t, err) require.Equal(t, resource, funcs.deleted) @@ -594,87 +592,16 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Unlock() } -// Test if we can add metadata from past events to an enricher that is associated -// with a resource that had already triggered the handler functions -func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { - log := logp.NewLogger(selector) - - resourceWatchers := NewWatchers() - - resourceWatchers.lock.Lock() - resourceWatchers.metaWatchersMap[PodResource] = &metaWatcher{ - watcher: &mockWatcher{}, - started: false, - metricsetsUsing: []string{"pod", "state_pod"}, - metadataObjects: make(map[string]bool), - enrichers: make(map[string]*enricher), - } - resourceWatchers.lock.Unlock() - - funcs := mockFuncs{} - resource1 := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID("mockuid"), - Name: "enrich", - Labels: map[string]string{ - "label": "value", - }, - Namespace: "default", - }, - } - id1 := "default/enrich" - resource2 := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID("mockuid2"), - Name: "enrich-2", - Labels: map[string]string{ - "label": "value", - }, - Namespace: "default-2", - }, - } - id2 := "default-2/enrich-2" - - config := &kubernetesConfig{ - Namespace: "test-ns", - SyncPeriod: time.Minute, - Node: "test-node", - AddResourceMetadata: &metadata.AddResourceMetadataConfig{ - CronJob: false, - Deployment: false, - }, - } - - enricher := buildMetadataEnricher("pod", PodResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index, log) - enricher.Start(resourceWatchers) - - resourceWatchers.lock.Lock() - - watcher := resourceWatchers.metaWatchersMap[PodResource] - mockW := watcher.watcher.(*mockWatcher) - resourceWatchers.lock.Unlock() - - mockW.handler.OnAdd(resource1) - - resourceWatchers.lock.Lock() - metadataObjects := map[string]bool{id1: true} - require.Equal(t, metadataObjects, watcher.metadataObjects) - resourceWatchers.lock.Unlock() - - mockW.handler.OnUpdate(resource2) - - resourceWatchers.lock.Lock() - metadataObjects[id2] = true - require.Equal(t, metadataObjects, watcher.metadataObjects) - resourceWatchers.lock.Unlock() - - mockW.handler.OnDelete(resource1) - - resourceWatchers.lock.Lock() - delete(metadataObjects, id1) - require.Equal(t, metadataObjects, watcher.metadataObjects) - resourceWatchers.lock.Unlock() +func TestGetWatcherStoreKeyFromMetadataKey(t *testing.T) { + t.Run("global resource", func(t *testing.T) { + assert.Equal(t, "name", getWatcherStoreKeyFromMetadataKey("name")) + }) + t.Run("namespaced resource", func(t *testing.T) { + assert.Equal(t, "namespace/name", getWatcherStoreKeyFromMetadataKey("namespace/name")) + }) + t.Run("container", func(t *testing.T) { + assert.Equal(t, "namespace/pod", getWatcherStoreKeyFromMetadataKey("namespace/pod/container")) + }) } type mockFuncs struct { @@ -716,6 +643,19 @@ func (f *mockFuncs) index(m mapstr.M) string { type mockWatcher struct { handler kubernetes.ResourceEventHandler + store cache.Store +} + +func newMockWatcher() *mockWatcher { + return &mockWatcher{ + store: cache.NewStore(func(obj interface{}) (string, error) { + objName, err := cache.ObjectToName(obj) + if err != nil { + return "", err + } + return objName.Name, nil + }), + } } func (m *mockWatcher) GetEventHandler() kubernetes.ResourceEventHandler { @@ -735,7 +675,7 @@ func (m *mockWatcher) AddEventHandler(r kubernetes.ResourceEventHandler) { } func (m *mockWatcher) Store() cache.Store { - return nil + return m.store } func (m *mockWatcher) Client() k8s.Interface { From e3476939420eb24d4ccc1741f38aa32fa11b30c2 Mon Sep 17 00:00:00 2001 From: "Alex K." <8418476+fearful-symmetry@users.noreply.github.com> Date: Thu, 17 Oct 2024 07:19:46 -0700 Subject: [PATCH 49/90] use 1.80 (#41271) --- x-pack/packetbeat/magefile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go index 88a2857e6774..543498164683 100644 --- a/x-pack/packetbeat/magefile.go +++ b/x-pack/packetbeat/magefile.go @@ -36,7 +36,7 @@ import ( // the packetbeat executable. It is used to specify which npcap builder crossbuild // image to use and the installer to obtain from the cloud store for testing. const ( - NpcapVersion = "1.79" + NpcapVersion = "1.80" installer = "npcap-" + NpcapVersion + "-oem.exe" ) From 02664106e2d21861faf8bcbf3d112c3fde51bcc4 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 20:17:11 +0200 Subject: [PATCH 50/90] [main](backport #41261) docs: Prepare Changelog for 8.15.3 (#41292) * docs: Prepare Changelog for 8.15.3 (#41261) * docs: Close changelog for 8.15.3 * Update CHANGELOG.asciidoc * Update CHANGELOG.asciidoc * Apply suggestions from code review Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Craig MacKenzie --------- Co-authored-by: elasticmachine Co-authored-by: Pierre HILBERT Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Co-authored-by: Craig MacKenzie (cherry picked from commit e29591eeb3e536d8e8abf95a30759362b9f4874a) # Conflicts: # libbeat/docs/release.asciidoc * Update release.asciidoc --------- Co-authored-by: elastic-vault-github-plugin-prod[bot] <150874479+elastic-vault-github-plugin-prod[bot]@users.noreply.github.com> Co-authored-by: Pierre HILBERT --- CHANGELOG.asciidoc | 41 +++++++++++++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 20 +++++------------ libbeat/docs/release.asciidoc | 2 ++ 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 4bf184b3a503..ab562abbb54a 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,47 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.15.3]] +=== Beats version 8.15.3 +https://github.com/elastic/beats/compare/v8.15.2\...v8.15.3[View commits] + +==== Breaking changes + +*Filebeat* + +- Change `log.file.path` field in awscloudwatch input to nested object. {pull}41099[41099] + +==== Bugfixes + +*Affecting all Beats* + +- Allow port number 0 in the community ID flowhash processor. {pull}40259[40259] +- The journald input now restarts if there is an error/crash. {issue}32782[32782] {pull}40558[40558] + +*Filebeat* + +- Fix replace processor handling of zero string replacement validation. {pull}40751[40751] +- Add backup and delete for AWS S3 polling mode feature back. {pull}41071[41071] + +*Metricbeat* + +- Use namespace for GetListMetrics when it exists in AWS. {pull}41022[41022] + +*Packetbeat* + +- Fix upload of bundled ingest pipelines on Windows. {pull}41110[41110] + +==== Added + +*Affecting all Beats* + +- Update Go version to 1.22.8. {pull}41139[41139] + +*Metricbeat* + +- Restore `docker.network.in.*` and `docker.network.out.*` fields in docker module. {pull}40968[40968] + + [[release-notes-8.15.2]] === Beats version 8.15.2 https://github.com/elastic/beats/compare/v8.15.0\...v8.15.2[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1886842df294..09fa93e2dbc7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -106,19 +106,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Support Elastic Agent control protocol chunking support {pull}37343[37343] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] - Set timeout of 1 minute for FQDN requests {pull}37756[37756] -- Fix the paths in the .cmd script added to the path by the Windows MSI to point to the new C:\Program Files installation location. https://github.com/elastic/elastic-stack-installers/pull/238 -- Change cache processor documentation from `write_period` to `write_interval`. {pull}38561[38561] -- Fix cache processor expiries heap cleanup on partial file writes. {pull}38561[38561] -- Fix cache processor expiries infinite growth when large a large TTL is used and recurring keys are cached. {pull}38561[38561] -- Fix parsing of RFC 3164 process IDs in syslog processor. {issue}38947[38947] {pull}38982[38982] -- Rename the field "apache2.module.error" to "apache.module.error" in Apache error visualization. {issue}39480[39480] {pull}39481[39481] -- Validate config of the `replace` processor {pull}40047[40047] -- Allow port number 0 in the community ID flowhash processor {pull}40259[40259] -- Fix handling of escaped brackets in syslog structured data. {issue}40445[40445] {pull}40446[40446] -- Aborts all active connections for Elasticsearch output. {pull}40572[40572] -- Closes beat Publisher on beat stop and by the Agent manager. {pull}40572[40572] -- The journald input now restarts if there is an error/crash {issue}32782[32782] {pull}40558[40558] -- Update Go version to 1.22.8. {pull}41139[41139] *Auditbeat* @@ -214,7 +201,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Packetbeat* -- Fix upload of bundled ingest pipelines on Windows. {pull}41110[41110] *Winlogbeat* @@ -430,3 +416,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] + + + + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index ae01beb5c4ff..94cece60fb86 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,8 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> +* <> * <> * <> * <> From 0c2f9e7988fd81f286ef53c3df62a6147293aced Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Fri, 18 Oct 2024 18:51:09 +1030 Subject: [PATCH 51/90] x-pack/filebeat/input/streaming: log websocket bad handshake details (#41300) --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/streaming/websocket.go | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 09fa93e2dbc7..2f1a5377ad9c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -163,6 +163,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix a bug in Salesforce input to only handle responses with 200 status code {pull}41015[41015] - Fixed failed job handling and removed false-positive error logs in the GCS input. {pull}41142[41142] - Bump github.com/elastic/go-sfdc dependency used by x-pack/filebeat/input/salesforce. {pull}41192[41192] +- Log bad handshake details when websocket connection fails {pull}41300[41300] *Heartbeat* diff --git a/x-pack/filebeat/input/streaming/websocket.go b/x-pack/filebeat/input/streaming/websocket.go index ce0f086e558b..1deaf8b07fa5 100644 --- a/x-pack/filebeat/input/streaming/websocket.go +++ b/x-pack/filebeat/input/streaming/websocket.go @@ -228,7 +228,11 @@ func connectWebSocket(ctx context.Context, cfg config, url string, log *logp.Log if err == nil { return conn, response, nil } - log.Debugw("attempt %d: webSocket connection failed. retrying...\n", attempt) + if err == websocket.ErrBadHandshake { + log.Errorf("attempt %d: webSocket connection failed with bad handshake (status %d) retrying...\n", attempt, response.StatusCode) + continue + } + log.Debugf("attempt %d: webSocket connection failed. retrying...\n", attempt) waitTime := calculateWaitTime(retryConfig.WaitMin, retryConfig.WaitMax, attempt) time.Sleep(waitTime) } From 96d9581d76a5eaaf8fe4e2b852769ddfbab148c7 Mon Sep 17 00:00:00 2001 From: ShourieG <105607378+ShourieG@users.noreply.github.com> Date: Fri, 18 Oct 2024 14:47:39 +0530 Subject: [PATCH 52/90] [filebeat][Azure Blob Storage] - Improved documentation (#41252) * improved documentation --- CHANGELOG.next.asciidoc | 1 + .../inputs/input-azure-blob-storage.asciidoc | 20 +++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2f1a5377ad9c..6b5659878923 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -318,6 +318,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Jounrald input now supports filtering by facilities {pull}41061[41061] - System module now supports reading from jounrald. {pull}41061[41061] - Add support to include AWS cloudwatch linked accounts when using log_group_name_prefix to define log group names. {pull}41206[41206] +- Improved Azure Blob Storage input documentation. {pull}41252[41252] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc b/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc index 0ee02cf91d78..8c04d9f2a00a 100644 --- a/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-azure-blob-storage.asciidoc @@ -26,8 +26,8 @@ even though it can get expensive with dealing with a very large number of files. describing said error. [id="supported-types"] -NOTE: NOTE: Currently only `JSON` and `NDJSON` are supported blob/file formats. Blobs/files may be also be gzip compressed. -As for authentication types, we currently have support for `shared access keys` and `connection strings`. +NOTE: `JSON`, `NDJSON` and `CSV` are supported blob/file formats. Blobs/files may be also be gzip compressed. +`shared access keys`, `connection strings` and `Microsoft Entra ID RBAC` authentication types are supported. [id="basic-config"] *A sample configuration with detailed explanation for each field is given below :-* @@ -224,10 +224,14 @@ This is a specific subfield of a container. It specifies the container name. [float] ==== `max_workers` -This attribute defines the maximum number of workers (go routines / lightweight threads) are allocated in the worker pool (thread pool) for processing jobs -which read contents of file. More number of workers equals a greater amount of concurrency achieved. There is an upper cap of `5000` workers per container that -can be defined due to internal sdk constraints. This attribute can be specified both at the root level of the configuration as well at the container level. -The container level values will always take priority and override the root level values if both are specified. +This attribute defines the maximum number of workers allocated to the worker pool for processing jobs which read file contents. It can be specified both at the root level of the configuration, and at the container level. Container level values override root level values if both are specified. Larger number of workers do not necessarily improve throughput, and this should be carefully tuned based on the number of files, the size of the files being processed and resources available. Increasing `max_workers` to very high values may cause resource utilization problems and may lead to bottlenecks in processing. Usually a maximum of `2000` workers is recommended. A very low `max_worker` count will drastically increase the number of network calls required to fetch the blobs, which may cause a bottleneck in processing. + +The batch size for workload distribution is calculated by the input to ensure that there is an even workload across all workers. This means that for a given `max_workers` parameter value, the input will calculate the optimal batch size for that setting. The `max_workers` value should be configured based on factors such as the total number of files to be processed, available system resources and network bandwidth. + +Example: + +- Setting `max_workers=3` would result in each request fetching `3 blobs` (batch size = 3), which are then distributed among `3 workers`. +- Setting `max_workers=100` would fetch `100 blobs` (batch size = 100) per request, distributed among `100 workers`. [id="attrib-poll"] [float] @@ -325,6 +329,8 @@ filebeat.inputs: - regex: '/Security-Logs/' ---- +The `file_selectors` operation is performed within the agent locally. The agent will download all the files and then filter them based on the `file_selectors`. This can cause a bottleneck in processing if the number of files are very high. It is recommended to use this attribute only when the number of files are limited or ample resources are available. + [id="attrib-expand_event_list_from_field"] [float] ==== `expand_event_list_from_field` @@ -385,6 +391,8 @@ filebeat.inputs: timestamp_epoch: 1627233600 ---- +The Azure Blob Storage APIs don't provide a direct way to filter files based on timestamp, so the input will download all the files and then filter them based on the timestamp. This can cause a bottleneck in processing if the number of files are very high. It is recommended to use this attribute only when the number of files are limited or ample resources are available. + [id="container-overrides"] *The sample configs below will explain the container level overriding of attributes a bit further :-* From b493c7fcd6bb15cde707f5b125220d0e87918225 Mon Sep 17 00:00:00 2001 From: Geoff Rowland <70981735+rowlandgeoff@users.noreply.github.com> Date: Fri, 18 Oct 2024 09:28:05 -0400 Subject: [PATCH 53/90] CODEOWNERS refactor (#41234) --- .github/CODEOWNERS | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f04bf64fae47..49b9f58a4b63 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -12,14 +12,18 @@ # https://github.community/t/codeowners-file-with-a-not-file-type-condition/1423/9 CHANGELOG* +# Top-level files ownership +/catalog-info.yaml @elastic/ingest-eng-prod # The tech leads of the teams working in Beats share ownership of the Go module dependencies and related files. -/.github/CODEOWNERS @elastic/beats-tech-leads /go.mod @elastic/beats-tech-leads /go.sum @elastic/beats-tech-leads /NOTICE.txt @elastic/beats-tech-leads +# Sub-directories/files ownership. Remember that order matters; the last matching pattern takes the most precedence. +/.buildkite @elastic/ingest-eng-prod /.ci/ @elastic/elastic-agent-data-plane @elastic/ingest-eng-prod /.github/ @elastic/ingest-eng-prod +/.github/CODEOWNERS @elastic/beats-tech-leads /auditbeat/ @elastic/sec-linux-platform /deploy/ @elastic/elastic-agent-data-plane /deploy/kubernetes @elastic/elastic-agent-data-plane @elastic/obs-cloudnative-monitoring @@ -55,6 +59,8 @@ CHANGELOG* /libbeat/ @elastic/elastic-agent-data-plane /libbeat/docs/processors-list.asciidoc @elastic/ingest-docs /libbeat/management @elastic/elastic-agent-control-plane +/libbeat/processors/add_cloud_metadata @elastic/obs-cloud-monitoring +/libbeat/processors/add_kubernetes_metadata @elastic/obs-cloudnative-monitoring /libbeat/processors/cache/ @elastic/security-service-integrations /libbeat/processors/community_id/ @elastic/sec-deployment-and-devices /libbeat/processors/decode_xml/ @elastic/security-service-integrations @@ -63,9 +69,8 @@ CHANGELOG* /libbeat/processors/registered_domain/ @elastic/sec-deployment-and-devices /libbeat/processors/syslog/ @elastic/sec-deployment-and-devices /libbeat/processors/translate_sid/ @elastic/sec-windows-platform -/libbeat/processors/add_cloud_metadata @elastic/obs-cloud-monitoring -/libbeat/processors/add_kubernetes_metadata @elastic/obs-cloudnative-monitoring /libbeat/reader/syslog/ @elastic/sec-deployment-and-devices +/libbeat/scripts @elastic/ingest-eng-prod /licenses/ @elastic/elastic-agent-data-plane /metricbeat/ @elastic/elastic-agent-data-plane /metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. @@ -99,6 +104,7 @@ CHANGELOG* /metricbeat/module/system/ @elastic/elastic-agent-data-plane /metricbeat/module/vsphere @elastic/obs-infraobs-integrations /metricbeat/module/zookeeper @elastic/obs-infraobs-integrations +/metricbeat/tests @elastic/ingest-eng-prod /packetbeat/ @elastic/sec-linux-platform /script/ @elastic/elastic-agent-data-plane /testing/ @elastic/elastic-agent-data-plane @@ -227,9 +233,3 @@ CHANGELOG* /x-pack/osquerybeat/ @elastic/sec-deployment-and-devices /x-pack/packetbeat/ @elastic/sec-linux-platform /x-pack/winlogbeat/ @elastic/sec-windows-platform - -# Ownership of CI or related files by the Ingest Eng Prod team -/.buildkite @elastic/ingest-eng-prod -/catalog-info.yaml @elastic/ingest-eng-prod -/libbeat/scripts @elastic/ingest-eng-prod -/metricbeat/tests @elastic/ingest-eng-prod From 6766cfaf65d12a546d5615007bb497adbea83f9e Mon Sep 17 00:00:00 2001 From: Christiano Haesbaert Date: Fri, 18 Oct 2024 16:31:51 +0200 Subject: [PATCH 54/90] [auditbeat] Allow memfd_create(2) in seccomp for add_session_metadata@ebpf (#41297) Quark was falling back into kprobe since ebpf would fail with EPERM at memfd_create(2). ``` $ strace -f auditbeat .... [pid 2917] memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC) = -1 EPERM (Operation not permitted) ``` With this my test case where kprobe is disabled now uses ebpf when I select backend "auto", before it was falling back to procfsprovider. --- x-pack/auditbeat/seccomp_linux.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/x-pack/auditbeat/seccomp_linux.go b/x-pack/auditbeat/seccomp_linux.go index 709d973465d6..5dd05618d31c 100644 --- a/x-pack/auditbeat/seccomp_linux.go +++ b/x-pack/auditbeat/seccomp_linux.go @@ -35,5 +35,13 @@ func init() { ); err != nil { panic(err) } + + // The sessionmd processor kerneltracingprovider needs + // memfd_create to operate via EBPF + if err := seccomp.ModifyDefaultPolicy(seccomp.AddSyscall, + "memfd_create", + ); err != nil { + panic(err) + } } } From 9e6a942c01dd092e0be5b613c195213df75d6ad7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Fri, 18 Oct 2024 17:20:01 +0200 Subject: [PATCH 55/90] Only watch metadata for ReplicaSets in metricbeat k8s module (#41289) --- CHANGELOG.next.asciidoc | 1 + .../module/kubernetes/util/kubernetes.go | 187 ++++++++++++------ .../module/kubernetes/util/kubernetes_test.go | 167 +++++++++++++++- 3 files changed, 284 insertions(+), 71 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6b5659878923..8eebaa311378 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -365,6 +365,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Added Cisco Meraki module {pull}40836[40836] - Added Palo Alto Networks module {pull}40686[40686] - Restore docker.network.in.* and docker.network.out.* fields in docker module {pull}40968[40968] +- Only watch metadata for ReplicaSets in metricbeat k8s module {pull}41289[41289] *Metricbeat* diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index c17f5ba97183..5844c555c888 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -25,7 +25,12 @@ import ( "sync" "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/runtime/schema" + k8sclient "k8s.io/client-go/kubernetes" + k8sclientmeta "k8s.io/client-go/metadata" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -304,6 +309,7 @@ func createWatcher( resource kubernetes.Resource, options kubernetes.WatchOptions, client k8sclient.Interface, + metadataClient k8sclientmeta.Interface, resourceWatchers *Watchers, namespace string, extraWatcher bool) (bool, error) { @@ -355,9 +361,27 @@ func createWatcher( if isNamespaced(resourceName) { options.Namespace = namespace } - watcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) + var ( + watcher kubernetes.Watcher + err error + ) + switch resource.(type) { + // use a metadata informer for ReplicaSets, as we only need their metadata + case *kubernetes.ReplicaSet: + watcher, err = kubernetes.NewNamedMetadataWatcher( + "resource_metadata_enricher_rs", + client, + metadataClient, + schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, + options, + nil, + transformReplicaSetMetadata, + ) + default: + watcher, err = kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) + } if err != nil { - return false, err + return false, fmt.Errorf("error creating watcher for %T: %w", resource, err) } resourceMetaWatcher = &metaWatcher{ @@ -450,6 +474,7 @@ func removeFromMetricsetsUsing(resourceName string, notUsingName string, resourc // createAllWatchers creates all the watchers required by a metricset func createAllWatchers( client k8sclient.Interface, + metadataClient k8sclientmeta.Interface, metricsetName string, resourceName string, nodeScope bool, @@ -469,7 +494,7 @@ func createAllWatchers( // Create the main watcher for the given resource. // For example pod metricset's main watcher will be pod watcher. // If it fails, we return an error, so we can stop the extra watchers from creating. - created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(resourceName, res, *options, client, metadataClient, resourceWatchers, config.Namespace, false) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { @@ -484,7 +509,7 @@ func createAllWatchers( for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace, true) + created, err = createWatcher(extra, extraRes, *options, client, metadataClient, resourceWatchers, config.Namespace, true) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { @@ -620,11 +645,16 @@ func NewResourceMetadataEnricher( log.Errorf("Error creating Kubernetes client: %s", err) return &nilEnricher{} } + metadataClient, err := kubernetes.GetKubernetesMetadataClient(config.KubeConfig, config.KubeClientOptions) + if err != nil { + log.Errorf("Error creating Kubernetes client: %s", err) + return &nilEnricher{} + } metricsetName := base.Name() resourceName := getResourceName(metricsetName) // Create all watchers needed for this metricset - err = createAllWatchers(client, metricsetName, resourceName, nodeScope, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetName, resourceName, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -659,61 +689,7 @@ func NewResourceMetadataEnricher( // It is responsible for generating the metadata for a detected resource by executing the metadata generators Generate method. // It is a common handler for all resource watchers. The kind of resource(e.g. pod or deployment) is checked inside the function. // It returns a map of a resource identifier(i.e. namespace-resource_name) as key and the metadata as value. - updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { - accessor, _ := meta.Accessor(r) - id := accessor.GetName() - namespace := accessor.GetNamespace() - if namespace != "" { - id = join(namespace, id) - } - - switch r := r.(type) { - case *kubernetes.Pod: - return map[string]mapstr.M{id: specificMetaGen.Generate(r)} - - case *kubernetes.Node: - nodeName := r.GetObjectMeta().GetName() - metrics := NewNodeMetrics() - if cpu, ok := r.Status.Capacity["cpu"]; ok { - if q, err := resource.ParseQuantity(cpu.String()); err == nil { - metrics.CoresAllocatable = NewFloat64Metric(float64(q.MilliValue()) / 1000) - } - } - if memory, ok := r.Status.Capacity["memory"]; ok { - if q, err := resource.ParseQuantity(memory.String()); err == nil { - metrics.MemoryAllocatable = NewFloat64Metric(float64(q.Value())) - } - } - nodeStore, _ := metricsRepo.AddNodeStore(nodeName) - nodeStore.SetNodeMetrics(metrics) - - return map[string]mapstr.M{id: generalMetaGen.Generate(NodeResource, r)} - case *kubernetes.Deployment: - return map[string]mapstr.M{id: generalMetaGen.Generate(DeploymentResource, r)} - case *kubernetes.Job: - return map[string]mapstr.M{id: generalMetaGen.Generate(JobResource, r)} - case *kubernetes.CronJob: - return map[string]mapstr.M{id: generalMetaGen.Generate(CronJobResource, r)} - case *kubernetes.Service: - return map[string]mapstr.M{id: specificMetaGen.Generate(r)} - case *kubernetes.StatefulSet: - return map[string]mapstr.M{id: generalMetaGen.Generate(StatefulSetResource, r)} - case *kubernetes.Namespace: - return map[string]mapstr.M{id: generalMetaGen.Generate(NamespaceResource, r)} - case *kubernetes.ReplicaSet: - return map[string]mapstr.M{id: generalMetaGen.Generate(ReplicaSetResource, r)} - case *kubernetes.DaemonSet: - return map[string]mapstr.M{id: generalMetaGen.Generate(DaemonSetResource, r)} - case *kubernetes.PersistentVolume: - return map[string]mapstr.M{id: generalMetaGen.Generate(PersistentVolumeResource, r)} - case *kubernetes.PersistentVolumeClaim: - return map[string]mapstr.M{id: generalMetaGen.Generate(PersistentVolumeClaimResource, r)} - case *kubernetes.StorageClass: - return map[string]mapstr.M{id: generalMetaGen.Generate(StorageClassResource, r)} - default: - return map[string]mapstr.M{id: generalMetaGen.Generate(r.GetObjectKind().GroupVersionKind().Kind, r)} - } - } + updateFunc := getEventMetadataFunc(log, generalMetaGen, specificMetaGen, metricsRepo) // deleteFunc to be used as the resource watcher's delete handler. // The deleteFunc is executed when a watcher is triggered for a resource deletion(e.g. pod deleted). @@ -797,10 +773,15 @@ func NewContainerMetadataEnricher( log.Errorf("Error creating Kubernetes client: %s", err) return &nilEnricher{} } + metadataClient, err := kubernetes.GetKubernetesMetadataClient(config.KubeConfig, config.KubeClientOptions) + if err != nil { + log.Errorf("Error creating Kubernetes client: %s", err) + return &nilEnricher{} + } metricsetName := base.Name() - err = createAllWatchers(client, metricsetName, PodResource, nodeScope, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetName, PodResource, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -1231,3 +1212,87 @@ func AddClusterECSMeta(base mb.BaseMetricSet) mapstr.M { } return ecsClusterMeta } + +// transformReplicaSetMetadata ensures that the PartialObjectMetadata resources we get from a metadata watcher +// can be correctly interpreted by the update function returned by getEventMetadataFunc. +// This really just involves adding missing type information. +func transformReplicaSetMetadata(obj interface{}) (interface{}, error) { + old, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("obj of type %T neither a ReplicaSet nor a PartialObjectMetadata", obj) + } + old.TypeMeta = metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "ReplicaSet", + } + return old, nil +} + +// getEventMetadataFunc returns a function that takes a kubernetes Resource as an argument and returns metadata +// that can directly be used for event enrichment. +// This function is intended to be used as the resource watchers add and update handler. +func getEventMetadataFunc( + logger *logp.Logger, + generalMetaGen *metadata.Resource, + specificMetaGen metadata.MetaGen, + metricsRepo *MetricsRepo, +) func(r kubernetes.Resource) map[string]mapstr.M { + return func(r kubernetes.Resource) map[string]mapstr.M { + accessor, accErr := meta.Accessor(r) + if accErr != nil { + logger.Errorf("Error creating accessor: %s", accErr) + } + id := accessor.GetName() + namespace := accessor.GetNamespace() + if namespace != "" { + id = join(namespace, id) + } + + switch r := r.(type) { + case *kubernetes.Pod: + return map[string]mapstr.M{id: specificMetaGen.Generate(r)} + + case *kubernetes.Node: + nodeName := r.GetObjectMeta().GetName() + metrics := NewNodeMetrics() + if cpu, ok := r.Status.Capacity["cpu"]; ok { + if q, err := resource.ParseQuantity(cpu.String()); err == nil { + metrics.CoresAllocatable = NewFloat64Metric(float64(q.MilliValue()) / 1000) + } + } + if memory, ok := r.Status.Capacity["memory"]; ok { + if q, err := resource.ParseQuantity(memory.String()); err == nil { + metrics.MemoryAllocatable = NewFloat64Metric(float64(q.Value())) + } + } + nodeStore, _ := metricsRepo.AddNodeStore(nodeName) + nodeStore.SetNodeMetrics(metrics) + + return map[string]mapstr.M{id: generalMetaGen.Generate(NodeResource, r)} + case *kubernetes.Deployment: + return map[string]mapstr.M{id: generalMetaGen.Generate(DeploymentResource, r)} + case *kubernetes.Job: + return map[string]mapstr.M{id: generalMetaGen.Generate(JobResource, r)} + case *kubernetes.CronJob: + return map[string]mapstr.M{id: generalMetaGen.Generate(CronJobResource, r)} + case *kubernetes.Service: + return map[string]mapstr.M{id: specificMetaGen.Generate(r)} + case *kubernetes.StatefulSet: + return map[string]mapstr.M{id: generalMetaGen.Generate(StatefulSetResource, r)} + case *kubernetes.Namespace: + return map[string]mapstr.M{id: generalMetaGen.Generate(NamespaceResource, r)} + case *kubernetes.ReplicaSet: + return map[string]mapstr.M{id: generalMetaGen.Generate(ReplicaSetResource, r)} + case *kubernetes.DaemonSet: + return map[string]mapstr.M{id: generalMetaGen.Generate(DaemonSetResource, r)} + case *kubernetes.PersistentVolume: + return map[string]mapstr.M{id: generalMetaGen.Generate(PersistentVolumeResource, r)} + case *kubernetes.PersistentVolumeClaim: + return map[string]mapstr.M{id: generalMetaGen.Generate(PersistentVolumeClaimResource, r)} + case *kubernetes.StorageClass: + return map[string]mapstr.M{id: generalMetaGen.Generate(StorageClassResource, r)} + default: + return map[string]mapstr.M{id: generalMetaGen.Generate(r.GetObjectKind().GroupVersionKind().Kind, r)} + } + } +} diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 703035d5c38d..ec2309b08bf3 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" @@ -40,6 +42,7 @@ import ( "github.com/stretchr/testify/require" k8sfake "k8s.io/client-go/kubernetes/fake" + k8smetafake "k8s.io/client-go/metadata/fake" "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-libs/logp" @@ -70,6 +73,7 @@ func TestCreateWatcher(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() + metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) config := &kubernetesConfig{ Namespace: "test-ns", SyncPeriod: time.Minute, @@ -80,7 +84,7 @@ func TestCreateWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -90,7 +94,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace, true) + created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, metadataClient, resourceWatchers, config.Namespace, true) require.False(t, created) require.NoError(t, err) @@ -100,7 +104,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) + created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -115,6 +119,7 @@ func TestAddToMetricsetsUsing(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() + metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) config := &kubernetesConfig{ Namespace: "test-ns", SyncPeriod: time.Minute, @@ -126,7 +131,7 @@ func TestAddToMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -152,6 +157,7 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() + metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) config := &kubernetesConfig{ Namespace: "test-ns", SyncPeriod: time.Minute, @@ -163,7 +169,7 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -192,6 +198,7 @@ func TestCreateAllWatchers(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() + metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) config := &kubernetesConfig{ Namespace: "test-ns", SyncPeriod: time.Minute, @@ -204,7 +211,7 @@ func TestCreateAllWatchers(t *testing.T) { log := logp.NewLogger("test") // Start watchers based on a resource that does not exist should cause an error - err := createAllWatchers(client, "does-not-exist", "does-not-exist", false, config, log, resourceWatchers) + err := createAllWatchers(client, metadataClient, "does-not-exist", "does-not-exist", false, config, log, resourceWatchers) require.Error(t, err) resourceWatchers.lock.Lock() require.Equal(t, 0, len(resourceWatchers.metaWatchersMap)) @@ -213,7 +220,7 @@ func TestCreateAllWatchers(t *testing.T) { // Start watcher for a resource that requires other resources, should start all the watchers metricsetPod := "pod" extras := getExtraWatchers(PodResource, config.AddResourceMetadata) - err = createAllWatchers(client, metricsetPod, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetPod, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) // Check that all the required watchers are in the map @@ -244,6 +251,7 @@ func TestCreateMetaGen(t *testing.T) { }, } client := k8sfake.NewSimpleClientset() + metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) _, err = createMetadataGen(client, commonConfig, config.AddResourceMetadata, DeploymentResource, resourceWatchers) // At this point, no watchers were created @@ -251,7 +259,7 @@ func TestCreateMetaGen(t *testing.T) { // Create the watchers necessary for the metadata generator metricsetDeployment := "state_deployment" - err = createAllWatchers(client, metricsetDeployment, DeploymentResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetDeployment, DeploymentResource, false, config, log, resourceWatchers) require.NoError(t, err) // Create the generators, this time without error @@ -284,6 +292,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { }, } client := k8sfake.NewSimpleClientset() + metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) // For pod: metricsetPod := "pod" @@ -293,7 +302,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { require.Error(t, err) // Create the pod resource + the extras - err = createAllWatchers(client, metricsetPod, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetPod, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, PodResource, resourceWatchers) @@ -306,7 +315,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { // Create the service resource + the extras metricsetService := "state_service" - err = createAllWatchers(client, metricsetService, ServiceResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetService, ServiceResource, false, config, log, resourceWatchers) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, ServiceResource, resourceWatchers) @@ -592,6 +601,144 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Unlock() } +func TestBuildMetadataEnricher_PartialMetadata(t *testing.T) { + resourceWatchers := NewWatchers() + + resourceWatchers.lock.Lock() + watcher := &metaWatcher{ + watcher: &mockWatcher{ + store: cache.NewStore(cache.MetaNamespaceKeyFunc), + }, + started: false, + metricsetsUsing: []string{"replicaset"}, + enrichers: make(map[string]*enricher), + } + resourceWatchers.metaWatchersMap[ReplicaSetResource] = watcher + addEventHandlerToWatcher(watcher, resourceWatchers) + resourceWatchers.lock.Unlock() + + isController := true + resource := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("mockuid"), + Name: "enrich", + Labels: map[string]string{ + "label": "value", + }, + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "enrich_deployment", + Controller: &isController, + }, + }, + }, + } + + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: true, + }, + } + + metricset := "replicaset" + log := logp.NewLogger(selector) + + commonMetaConfig := metadata.Config{} + commonConfig, _ := conf.NewConfigFrom(&commonMetaConfig) + client := k8sfake.NewSimpleClientset() + generalMetaGen := metadata.NewResourceMetadataGenerator(commonConfig, client) + + updateFunc := getEventMetadataFunc(log, generalMetaGen, nil, nil) + + deleteFunc := func(r kubernetes.Resource) []string { + accessor, _ := meta.Accessor(r) + id := accessor.GetName() + namespace := accessor.GetNamespace() + if namespace != "" { + id = join(namespace, id) + } + return []string{id} + } + + indexFunc := func(e mapstr.M) string { + name := getString(e, "name") + namespace := getString(e, mb.ModuleDataKey+".namespace") + var id string + if name != "" && namespace != "" { + id = join(namespace, name) + } else if namespace != "" { + id = namespace + } else { + id = name + } + return id + } + + enricher := buildMetadataEnricher(metricset, ReplicaSetResource, resourceWatchers, config, + updateFunc, deleteFunc, indexFunc, log) + + enricher.Start(resourceWatchers) + resourceWatchers.lock.Lock() + require.True(t, watcher.started) + resourceWatchers.lock.Unlock() + + // manually run the transform function here, just like the actual informer + transformed, err := transformReplicaSetMetadata(resource) + require.NoError(t, err) + watcher.watcher.GetEventHandler().OnAdd(transformed) + err = watcher.watcher.Store().Add(transformed) + require.NoError(t, err) + + // Test enricher + events := []mapstr.M{ + // {"name": "unknown"}, + {"name": resource.Name, mb.ModuleDataKey + ".namespace": resource.Namespace}, + } + enricher.Enrich(events) + + require.Equal(t, []mapstr.M{ + // {"name": "unknown"}, + { + "name": "enrich", + "_module": mapstr.M{ + "labels": mapstr.M{"label": "value"}, + "replicaset": mapstr.M{"name": "enrich", "uid": "mockuid"}, + "namespace": resource.Namespace, + "deployment": mapstr.M{ + "name": "enrich_deployment", + }, + }, + mb.ModuleDataKey + ".namespace": resource.Namespace, + "meta": mapstr.M{}, + }, + }, events) + + watcher.watcher.GetEventHandler().OnDelete(resource) + err = watcher.watcher.Store().Delete(resource) + require.NoError(t, err) + + events = []mapstr.M{ + {"name": "enrich"}, + } + enricher.Enrich(events) + + require.Equal(t, []mapstr.M{ + {"name": "enrich"}, + }, events) + + enricher.Stop(resourceWatchers) + resourceWatchers.lock.Lock() + require.False(t, watcher.started) + resourceWatchers.lock.Unlock() +} + func TestGetWatcherStoreKeyFromMetadataKey(t *testing.T) { t.Run("global resource", func(t *testing.T) { assert.Equal(t, "name", getWatcherStoreKeyFromMetadataKey("name")) From 7be47da326413fc9ae6e96a0c4b467ba481b6210 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Fri, 18 Oct 2024 10:16:58 -0700 Subject: [PATCH 56/90] [auditbeat] Update docs to promote add_session_metadata processor to GA (#41295) The add_session_metadata processor will be promoted to GA in 8.16, this updates the documentation to reflect that. This also has some other documentation improvements; more godoc comments on functions, and rename SyncDB to Sync to reflect it doesn't sync a DB in all providers. --- .../sessionmd/add_session_metadata.go | 4 +- .../docs/add_session_metadata.asciidoc | 10 +-- .../kerneltracingprovider_linux.go | 88 +++++++++++++------ .../kerneltracingprovider_other.go | 2 +- .../provider/procfsprovider/procfsprovider.go | 8 +- .../procfsprovider/procfsprovider_test.go | 10 +-- .../processors/sessionmd/provider/provider.go | 3 +- 7 files changed, 79 insertions(+), 46 deletions(-) diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index 28ef4697b79a..a4646b6b6685 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -13,7 +13,6 @@ import ( "strconv" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/processdb" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/procfs" @@ -47,7 +46,6 @@ type addSessionMetadata struct { } func New(cfg *cfg.C) (beat.Processor, error) { - cfgwarn.Beta("add_session_metadata processor is a beta feature.") c := defaultConfig() if err := cfg.Unpack(&c); err != nil { return nil, fmt.Errorf("fail to unpack the %v configuration: %w", processorName, err) @@ -129,7 +127,7 @@ func (p *addSessionMetadata) Run(ev *beat.Event) (*beat.Event, error) { return ev, nil //nolint:nilerr // Running on events with a different PID type is not a processor error } - err = p.provider.SyncDB(ev, pid) + err = p.provider.Sync(ev, pid) if err != nil { return ev, err } diff --git a/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc b/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc index aaddde322c14..8c9314d054ff 100644 --- a/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc +++ b/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc @@ -1,11 +1,9 @@ [[add-session-metadata]] -=== Add session metadata (Beta) +=== Add session metadata ++++ add_session_metadata ++++ -beta::[] - The `add_session_metadata` processor enriches process events with additional information that users can see using the {security-guide}/session-view.html[Session View] tool in the {elastic-sec} platform. @@ -41,9 +39,9 @@ The `add_session_metadata` processor operates using various backend options. * `auto` is the recommended setting. It attempts to use `kernel_tracing` first, falling back to `procfs` if necessary, ensuring compatibility even on systems without `kernel_tracing` support. -* `kernel_tracing` collects process information with eBPF or kprobes. - This backend will prefer to use eBPF, if eBPF is not supported kprobes will be used. eBPF requires a system with Linux kernel 5.10.16 or above, kernel support for eBPF enabled, and auditbeat running as superuser. - Kprobe support required Linux kernel 3.10.0 or above, and auditbeat running as a superuser. +* `kernel_tracing` gathers information about processes using either eBPF or kprobes. + It will use eBPF if available, but if not, it will fall back to kprobes. eBPF requires a system with kernel support for eBPF enabled, support for eBPF ring buffer, and auditbeat running as superuser. + Kprobe support requires Linux kernel 3.10.0 or above, and auditbeat running as a superuser. * `procfs` collects process information with the proc filesystem. This is compatible with older systems that may not support ebpf. To gather complete process info, auditbeat requires permissions to read all process data in procfs; for example, run as a superuser or have the `SYS_PTRACE` capability. diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go index 966f4b36c30c..506d840b5efa 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go @@ -71,15 +71,17 @@ var ( pidNsInode uint64 ) +// readBootID returns the boot ID of the Linux system from "/proc/sys/kernel/random/boot_id" func readBootID() (string, error) { bootID, err := os.ReadFile("/proc/sys/kernel/random/boot_id") if err != nil { - return "", fmt.Errorf("could not read /proc/sys/kernel/random/boot_id, process entity IDs will not be correct: %w", err) + return "", fmt.Errorf("could not read /proc/sys/kernel/random/boot_id: %w", err) } return strings.TrimRight(string(bootID), "\n"), nil } +// readPIDNsInode returns the PID namespace inode that auditbeat is running in from "/proc/self/ns/pid" func readPIDNsInode() (uint64, error) { var ret uint64 @@ -95,6 +97,7 @@ func readPIDNsInode() (uint64, error) { return ret, nil } +// NewProvider returns a new instance of kerneltracingprovider func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, error) { attr := quark.DefaultQueueAttr() attr.Flags = quark.QQ_ALL_BACKENDS | quark.QQ_ENTRY_LEADER | quark.QQ_NO_SNAPSHOT @@ -154,42 +157,31 @@ const ( resetDuration = 5 * time.Second // After this amount of times with no backoffs, the combinedWait will be reset ) -func (p *prvdr) SyncDB(_ *beat.Event, pid uint32) error { +// Sync ensures that the specified pid is present in the internal cache, to ensure the processor is capable of enriching the process. +// The function waits up to a maximum limit (maxWaitLimit) for the pid to appear in the cache using an exponential delay strategy. +// If the pid is not found within the time limit, then an error is returned. +// +// The function also maintains a moving window of time for tracking delays, and applies a backoff strategy if the combined wait time +// exceeds a certain limit (combinedWaitLimit). This is done so that in the case where there are multiple delays, the cumulative delay +// does not exceed a reasonable threshold that would delay all other events processed by auditbeat. When in the backoff state, enrichment +// will proceed without waiting for the process data to exist in the cache, likely resulting in missing enrichment data. +func (p *prvdr) Sync(_ *beat.Event, pid uint32) error { p.qqMtx.Lock() defer p.qqMtx.Unlock() - // Use qq.Lookup, not lookupLocked, in this function. Mutex is locked for entire function - + // If pid is already in qq, return immediately if _, found := p.qq.Lookup(int(pid)); found { return nil } - now := time.Now() + start := time.Now() + + p.handleBackoff(start) if p.inBackoff { - if now.Sub(p.backoffStart) > backoffDuration { - p.logger.Warnw("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) - p.inBackoff = false - p.combinedWait = 0 * time.Millisecond - } else { - p.backoffSkipped += 1 - return nil - } - } else { - if p.combinedWait > combinedWaitLimit { - p.logger.Warn("starting backoff") - p.inBackoff = true - p.backoffStart = now - p.backoffSkipped = 0 - return nil - } - // maintain a moving window of time for the delays we track - if now.Sub(p.since) > resetDuration { - p.since = now - p.combinedWait = 0 * time.Millisecond - } + return nil } - start := now + // Wait until either the process exists within the cache or the maxWaitLimit is exceeded, with an exponential delay nextWait := 5 * time.Millisecond for { waited := time.Since(start) @@ -211,6 +203,38 @@ func (p *prvdr) SyncDB(_ *beat.Event, pid uint32) error { } } +// handleBackoff handles backoff logic of `Sync` +// If the combinedWait time exceeds the combinedWaitLimit duration, the provider will go into backoff state until the backoffDuration is exceeded. +// If in a backoff period, it will track the number of skipped processes, and then log the number when exiting backoff. +// +// If there have been no backoffs within the resetDuration, the combinedWait duration is reset to zero, to keep a moving window in which delays are tracked. +func (p *prvdr) handleBackoff(now time.Time) { + if p.inBackoff { + if now.Sub(p.backoffStart) > backoffDuration { + p.logger.Warnw("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) + p.inBackoff = false + p.combinedWait = 0 * time.Millisecond + } else { + p.backoffSkipped += 1 + return + } + } else { + if p.combinedWait > combinedWaitLimit { + p.logger.Warn("starting backoff") + p.inBackoff = true + p.backoffStart = now + p.backoffSkipped = 0 + return + } + if now.Sub(p.since) > resetDuration { + p.since = now + p.combinedWait = 0 * time.Millisecond + } + } +} + +// GetProcess returns a reference to Process struct that contains all known information for the +// process, and its ancestors (parent, process group leader, session leader, and entry leader). func (p *prvdr) GetProcess(pid uint32) (*types.Process, error) { proc, found := p.lookupLocked(pid) if !found { @@ -271,6 +295,7 @@ func (p prvdr) lookupLocked(pid uint32) (quark.Process, bool) { return p.qq.Lookup(int(pid)) } +// fillParent populates the parent process fields with the attributes of the process with PID `ppid` func (p prvdr) fillParent(process *types.Process, ppid uint32) { proc, found := p.lookupLocked(ppid) if !found { @@ -304,6 +329,7 @@ func (p prvdr) fillParent(process *types.Process, ppid uint32) { process.Parent.EntityID = calculateEntityIDv1(ppid, *process.Start) } +// fillGroupLeader populates the process group leader fields with the attributes of the process with PID `pgid` func (p prvdr) fillGroupLeader(process *types.Process, pgid uint32) { proc, found := p.lookupLocked(pgid) if !found { @@ -338,6 +364,7 @@ func (p prvdr) fillGroupLeader(process *types.Process, pgid uint32) { process.GroupLeader.EntityID = calculateEntityIDv1(pgid, *process.GroupLeader.Start) } +// fillSessionLeader populates the session leader fields with the attributes of the process with PID `sid` func (p prvdr) fillSessionLeader(process *types.Process, sid uint32) { proc, found := p.lookupLocked(sid) if !found { @@ -372,6 +399,7 @@ func (p prvdr) fillSessionLeader(process *types.Process, sid uint32) { process.SessionLeader.EntityID = calculateEntityIDv1(sid, *process.SessionLeader.Start) } +// fillEntryLeader populates the entry leader fields with the attributes of the process with PID `elid` func (p prvdr) fillEntryLeader(process *types.Process, elid uint32) { proc, found := p.lookupLocked(elid) if !found { @@ -406,6 +434,7 @@ func (p prvdr) fillEntryLeader(process *types.Process, elid uint32) { process.EntryLeader.EntryMeta.Type = getEntryTypeName(proc.Proc.EntryLeaderType) } +// setEntityID sets entityID for the process and its parent, group leader, session leader, entry leader if possible func setEntityID(process *types.Process) { if process.PID != 0 && process.Start != nil { process.EntityID = calculateEntityIDv1(process.PID, *process.Start) @@ -428,6 +457,7 @@ func setEntityID(process *types.Process) { } } +// setSameAsProcess sets if the process is the same as its group leader, session leader, entry leader func setSameAsProcess(process *types.Process) { if process.GroupLeader.PID != 0 && process.GroupLeader.Start != nil { sameAsProcess := process.PID == process.GroupLeader.PID @@ -445,10 +475,12 @@ func setSameAsProcess(process *types.Process) { } } +// interactiveFromTTY returns if this is an interactive tty device. func interactiveFromTTY(tty types.TTYDev) bool { return TTYUnknown != getTTYType(tty.Major, tty.Minor) } +// getTTYType returns the type of a TTY device based on its major and minor numbers. func getTTYType(major uint32, minor uint32) TTYType { if major >= ptsMinMajor && major <= ptsMaxMajor { return Pts @@ -465,6 +497,8 @@ func getTTYType(major uint32, minor uint32) TTYType { return TTYUnknown } +// calculateEntityIDv1 calculates the entity ID for a process. +// This is a globally unique identifier for the process. func calculateEntityIDv1(pid uint32, startTime time.Time) string { return base64.StdEncoding.EncodeToString( []byte( diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go index e895a696747d..54f46f94209b 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_other.go @@ -22,7 +22,7 @@ func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, e return prvdr{}, fmt.Errorf("build type not supported, cgo required") } -func (p prvdr) SyncDB(event *beat.Event, pid uint32) error { +func (p prvdr) Sync(event *beat.Event, pid uint32) error { return fmt.Errorf("build type not supported") } diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go index 4934a79fc52c..e29e70a0549b 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go @@ -30,6 +30,7 @@ type prvdr struct { pidField string } +// NewProvider returns a new instance of procfsprovider. func NewProvider(ctx context.Context, logger *logp.Logger, db *processdb.DB, reader procfs.Reader, pidField string) (provider.Provider, error) { return prvdr{ ctx: ctx, @@ -40,12 +41,15 @@ func NewProvider(ctx context.Context, logger *logp.Logger, db *processdb.DB, rea }, nil } +// GetProcess is not implemented in this provider. +// This provider adds to the processdb, and process information is retrieved from the DB, not directly from the provider func (p prvdr) GetProcess(pid uint32) (*types.Process, error) { return nil, fmt.Errorf("not implemented") } -// SyncDB will update the process DB with process info from procfs or the event itself -func (p prvdr) SyncDB(ev *beat.Event, pid uint32) error { +// Sync updates the process information database using on the syscall event data and by scraping procfs. +// As process information will not be available in procfs after a process has exited, the provider is susceptible to missing information in short-lived events. +func (p prvdr) Sync(ev *beat.Event, pid uint32) error { syscall, err := ev.GetValue(syscallField) if err != nil { return fmt.Errorf("event not supported, no syscall data") diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go index 42f19f488ce4..3d4941882f3b 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go @@ -124,7 +124,7 @@ func TestExecveEvent(t *testing.T) { provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") - err = provider.SyncDB(&event, expected.PIDs.Tgid) + err = provider.Sync(&event, expected.PIDs.Tgid) require.Nil(t, err) actual, err := db.GetProcess(pid) @@ -234,7 +234,7 @@ func TestExecveatEvent(t *testing.T) { provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") - err = provider.SyncDB(&event, expected.PIDs.Tgid) + err = provider.Sync(&event, expected.PIDs.Tgid) require.Nil(t, err) actual, err := db.GetProcess(pid) @@ -317,7 +317,7 @@ func TestSetSidEvent(t *testing.T) { provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") - err = provider.SyncDB(&event, expected.PIDs.Tgid) + err = provider.Sync(&event, expected.PIDs.Tgid) require.Nil(t, err) actual, err := db.GetProcess(pid) @@ -399,7 +399,7 @@ func TestSetSidEventFailed(t *testing.T) { provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") - err = provider.SyncDB(&event, expected.PIDs.Tgid) + err = provider.Sync(&event, expected.PIDs.Tgid) require.Nil(t, err) actual, err := db.GetProcess(pid) @@ -470,7 +470,7 @@ func TestSetSidSessionLeaderNotScraped(t *testing.T) { provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") - err = provider.SyncDB(&event, expected.PIDs.Tgid) + err = provider.Sync(&event, expected.PIDs.Tgid) require.Nil(t, err) actual, err := db.GetProcess(pid) diff --git a/x-pack/auditbeat/processors/sessionmd/provider/provider.go b/x-pack/auditbeat/processors/sessionmd/provider/provider.go index 4ac9530cfeaa..8f464de93abf 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/provider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/provider.go @@ -11,8 +11,7 @@ import ( "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" ) -// SyncDB should ensure the DB is in a state to handle the event before returning. type Provider interface { - SyncDB(event *beat.Event, pid uint32) error + Sync(event *beat.Event, pid uint32) error GetProcess(pid uint32) (*types.Process, error) } From 5de22873957d4bb80bf3ed38aefd2f23115e3efe Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Mon, 21 Oct 2024 03:52:56 -0300 Subject: [PATCH 57/90] fix go vet errors with Go 1.24 (#41076) * fix go vet errors with Go 1.24 The cmd/vet in Go 1.24 reports printf calls with non-const format and no args, causing failures. ``` $ go install golang.org/dl/gotip@latest $ gotip download $ gotip vet ./... ``` * use os.WriteFile * more linter fixes * even more linter fixes * more more more linter fixes * fix wrong variable name * fix linter issues with emptyIface --- dev-tools/cmd/module_fields/module_fields.go | 5 +- .../module_include_list.go | 5 +- dev-tools/mage/common.go | 64 ++++++++++++------- heartbeat/hbtestllext/isdefs.go | 2 +- heartbeat/look/look_test.go | 3 +- heartbeat/monitors/active/icmp/stdloop.go | 2 +- .../summarizertesthelper/testhelper.go | 6 +- libbeat/cmd/instance/beat.go | 6 +- libbeat/common/cli/confirm.go | 6 +- libbeat/common/cli/input.go | 2 +- .../common/schema/mapstriface/mapstriface.go | 60 ++++++++--------- libbeat/dashboards/get.go | 2 +- libbeat/dashboards/kibana_loader.go | 14 ++-- .../processors/actions/decode_json_fields.go | 2 +- libbeat/processors/actions/include_fields.go | 2 +- metricbeat/helper/kubernetes/ktest/ktest.go | 2 +- .../module/elasticsearch/elasticsearch.go | 6 +- metricbeat/module/prometheus/query/data.go | 18 ++---- packetbeat/beater/processor.go | 2 +- packetbeat/config/agent.go | 4 +- packetbeat/procs/procs_linux.go | 4 +- packetbeat/protos/http/http_test.go | 4 +- 22 files changed, 114 insertions(+), 107 deletions(-) diff --git a/dev-tools/cmd/module_fields/module_fields.go b/dev-tools/cmd/module_fields/module_fields.go index 203cc2980287..3ba2d97f12ab 100644 --- a/dev-tools/cmd/module_fields/module_fields.go +++ b/dev-tools/cmd/module_fields/module_fields.go @@ -20,7 +20,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -104,7 +103,7 @@ func main() { log.Fatalf("Error creating golang file from template: %v", err) } - err = ioutil.WriteFile(filepath.Join(dir, module, "fields.go"), bs, 0644) + err = os.WriteFile(filepath.Join(dir, module, "fields.go"), bs, 0644) if err != nil { log.Fatalf("Error writing fields.go: %v", err) } @@ -112,6 +111,6 @@ func main() { } func usageFlag() { - fmt.Fprintf(os.Stderr, usageText) + fmt.Fprint(os.Stderr, usageText) flag.PrintDefaults() } diff --git a/dev-tools/cmd/module_include_list/module_include_list.go b/dev-tools/cmd/module_include_list/module_include_list.go index 4d222d2e707e..8bc58537b81e 100644 --- a/dev-tools/cmd/module_include_list/module_include_list.go +++ b/dev-tools/cmd/module_include_list/module_include_list.go @@ -22,7 +22,6 @@ import ( "bytes" "flag" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -164,13 +163,13 @@ func main() { } // Write the output file. - if err = ioutil.WriteFile(outFile, buf.Bytes(), 0644); err != nil { + if err = os.WriteFile(outFile, buf.Bytes(), 0644); err != nil { log.Fatalf("Failed writing output file: %v", err) } } func usageFlag() { - fmt.Fprintf(os.Stderr, usageText) + fmt.Fprint(os.Stderr, usageText) flag.PrintDefaults() } diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index 1c1ca25d95bc..01c683c11e7b 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -32,7 +32,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "net/http" "os" @@ -125,7 +124,7 @@ func joinMaps(args ...map[string]interface{}) map[string]interface{} { } func expandFile(src, dst string, args ...map[string]interface{}) error { - tmplData, err := ioutil.ReadFile(src) + tmplData, err := os.ReadFile(src) if err != nil { return fmt.Errorf("failed reading from template %v: %w", src, err) } @@ -140,7 +139,7 @@ func expandFile(src, dst string, args ...map[string]interface{}) error { return err } - if err = ioutil.WriteFile(createDir(dst), []byte(output), 0644); err != nil { + if err = os.WriteFile(createDir(dst), []byte(output), 0644); err != nil { return fmt.Errorf("failed to write rendered template: %w", err) } @@ -262,13 +261,13 @@ func FindReplace(file string, re *regexp.Regexp, repl string) error { return err } - contents, err := ioutil.ReadFile(file) + contents, err := os.ReadFile(file) if err != nil { return err } out := re.ReplaceAllString(string(contents), repl) - return ioutil.WriteFile(file, []byte(out), info.Mode().Perm()) + return os.WriteFile(file, []byte(out), info.Mode().Perm()) } // MustFindReplace invokes FindReplace and panics if an error occurs. @@ -283,9 +282,14 @@ func MustFindReplace(file string, re *regexp.Regexp, repl string) { func DownloadFile(url, destinationDir string) (string, error) { log.Println("Downloading", url) - resp, err := http.Get(url) + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) if err != nil { - return "", fmt.Errorf("http get failed: %w", err) + return "", fmt.Errorf("failed to create http request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to download file: %w", err) } defer resp.Body.Close() @@ -338,9 +342,9 @@ func unzip(sourceFile, destinationDir string) error { } defer innerFile.Close() - path := filepath.Join(destinationDir, f.Name) - if !strings.HasPrefix(path, destinationDir) { - return fmt.Errorf("illegal file path in zip: %v", f.Name) + path, err := sanitizeFilePath(f.Name, destinationDir) + if err != nil { + return err } if f.FileInfo().IsDir() { @@ -357,7 +361,7 @@ func unzip(sourceFile, destinationDir string) error { } defer out.Close() - if _, err = io.Copy(out, innerFile); err != nil { + if _, err = io.Copy(out, innerFile); err != nil { //nolint:gosec // this is only used for dev tools return err } @@ -374,6 +378,16 @@ func unzip(sourceFile, destinationDir string) error { return nil } +// sanitizeExtractPath sanitizes against path traversal attacks. +// See https://security.snyk.io/research/zip-slip-vulnerability. +func sanitizeFilePath(filePath string, workdir string) (string, error) { + destPath := filepath.Join(workdir, filePath) + if !strings.HasPrefix(destPath, filepath.Clean(workdir)+string(os.PathSeparator)) { + return filePath, fmt.Errorf("failed to extract illegal file path: %s", filePath) + } + return destPath, nil +} + // Tar compress a directory using tar + gzip algorithms but without adding // the directory func TarWithOptions(src string, targetFile string, trimSource bool) error { @@ -390,7 +404,7 @@ func TarWithOptions(src string, targetFile string, trimSource bool) error { tw := tar.NewWriter(zr) // walk through every file in the folder - filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error { + err = filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error { if errFn != nil { return fmt.Errorf("error traversing the file system: %w", errFn) } @@ -438,6 +452,9 @@ func TarWithOptions(src string, targetFile string, trimSource bool) error { } return nil }) + if err != nil { + return fmt.Errorf("error walking dir: %w", err) + } // produce tar if err := tw.Close(); err != nil { @@ -477,15 +494,15 @@ func untar(sourceFile, destinationDir string) error { for { header, err := tarReader.Next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return err } - path := filepath.Join(destinationDir, header.Name) - if !strings.HasPrefix(path, destinationDir) { - return fmt.Errorf("illegal file path in tar: %v", header.Name) + path, err := sanitizeFilePath(header.Name, destinationDir) + if err != nil { + return err } switch header.Typeflag { @@ -499,7 +516,7 @@ func untar(sourceFile, destinationDir string) error { return err } - if _, err = io.Copy(writer, tarReader); err != nil { + if _, err = io.Copy(writer, tarReader); err != nil { //nolint:gosec // this is only used for dev tools return err } @@ -613,7 +630,7 @@ func ParallelCtx(ctx context.Context, fns ...interface{}) { wg.Wait() if len(errs) > 0 { - panic(fmt.Errorf(strings.Join(errs, "\n"))) + panic(errors.New(strings.Join(errs, "\n"))) } } @@ -773,7 +790,7 @@ func CreateSHA512File(file string) error { computedHash := hex.EncodeToString(sum.Sum(nil)) out := fmt.Sprintf("%v %v", computedHash, filepath.Base(file)) - return ioutil.WriteFile(file+".sha512", []byte(out), 0644) + return os.WriteFile(file+".sha512", []byte(out), 0644) } // Mage executes mage targets in the specified directory. @@ -800,7 +817,7 @@ func IsUpToDate(dst string, sources ...string) bool { var files []string for _, s := range sources { - filepath.Walk(s, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(s, func(path string, info os.FileInfo, err error) error { if err != nil { if os.IsNotExist(err) { return nil @@ -814,6 +831,9 @@ func IsUpToDate(dst string, sources ...string) bool { return nil }) + if err != nil { + panic(fmt.Errorf("failed to walk source %v: %w", s, err)) + } } execute, err := target.Path(dst, files...) @@ -896,7 +916,7 @@ func ParseVersion(version string) (major, minor, patch int, err error) { matches := parseVersionRegex.FindStringSubmatch(version) if len(matches) == 0 { err = fmt.Errorf("failed to parse version '%v'", version) - return + return major, minor, patch, err } data := map[string]string{} @@ -906,7 +926,7 @@ func ParseVersion(version string) (major, minor, patch int, err error) { major, _ = strconv.Atoi(data["major"]) minor, _ = strconv.Atoi(data["minor"]) patch, _ = strconv.Atoi(data["patch"]) - return + return major, minor, patch, nil } // ListMatchingEnvVars returns all of the environment variables names that begin diff --git a/heartbeat/hbtestllext/isdefs.go b/heartbeat/hbtestllext/isdefs.go index e20f2cb18a1a..eeb56e166311 100644 --- a/heartbeat/hbtestllext/isdefs.go +++ b/heartbeat/hbtestllext/isdefs.go @@ -74,7 +74,7 @@ var IsMonitorStateInLocation = func(locName string) isdef.IsDef { } if !stateIdMatch.MatchString(s.ID) { - return llresult.SimpleResult(path, false, fmt.Sprintf("ID %s does not match regexp pattern /%s/", s.ID, locPattern)) + return llresult.SimpleResult(path, false, "ID %s does not match regexp pattern /%s/", s.ID, locPattern) } return llresult.ValidResult(path) }) diff --git a/heartbeat/look/look_test.go b/heartbeat/look/look_test.go index ff3f9bdcc54b..77f5377b2356 100644 --- a/heartbeat/look/look_test.go +++ b/heartbeat/look/look_test.go @@ -18,6 +18,7 @@ package look import ( + "errors" "testing" "time" @@ -57,7 +58,7 @@ func TestReason(t *testing.T) { func TestReasonGenericError(t *testing.T) { msg := "An error" - res := Reason(fmt.Errorf(msg)) + res := Reason(errors.New(msg)) assert.Equal(t, mapstr.M{ "type": "io", "message": msg, diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index f67ae402bc70..8fa0816bb5b9 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -110,7 +110,7 @@ func getStdLoop() (*stdICMPLoop, error) { } func noPingCapabilityError(message string) error { - return fmt.Errorf(fmt.Sprintf("Insufficient privileges to perform ICMP ping. %s", message)) + return fmt.Errorf("Insufficient privileges to perform ICMP ping. %s", message) } func newICMPLoop() (*stdICMPLoop, error) { diff --git a/heartbeat/monitors/wrappers/summarizer/summarizertesthelper/testhelper.go b/heartbeat/monitors/wrappers/summarizer/summarizertesthelper/testhelper.go index bcea2bd803ed..7c8d5de23bc4 100644 --- a/heartbeat/monitors/wrappers/summarizer/summarizertesthelper/testhelper.go +++ b/heartbeat/monitors/wrappers/summarizer/summarizertesthelper/testhelper.go @@ -22,8 +22,6 @@ package summarizertesthelper // prevent import cycles. import ( - "fmt" - "github.com/elastic/beats/v7/heartbeat/hbtestllext" "github.com/elastic/beats/v7/heartbeat/monitors/wrappers/summarizer/jobsummary" "github.com/elastic/go-lookslike" @@ -46,11 +44,11 @@ func summaryIsdef(up uint16, down uint16) isdef.IsDef { return isdef.Is("summary", func(path llpath.Path, v interface{}) *llresult.Results { js, ok := v.(jobsummary.JobSummary) if !ok { - return llresult.SimpleResult(path, false, fmt.Sprintf("expected a *jobsummary.JobSummary, got %v", v)) + return llresult.SimpleResult(path, false, "expected a *jobsummary.JobSummary, got %v", v) } if js.Up != up || js.Down != down { - return llresult.SimpleResult(path, false, fmt.Sprintf("expected up/down to be %d/%d, got %d/%d", up, down, js.Up, js.Down)) + return llresult.SimpleResult(path, false, "expected up/down to be %d/%d, got %d/%d", up, down, js.Up, js.Down) } return llresult.ValidResult(path) diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index df3a71416b61..1a6250fad4d5 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -388,7 +388,7 @@ func NewBeatReceiver(settings Settings, receiverConfig map[string]interface{}, c } // log paths values to help with troubleshooting - logp.Info(paths.Paths.String()) + logp.Info("%s", paths.Paths.String()) metaPath := paths.Resolve(paths.Data, "meta.json") err = b.loadMeta(metaPath) @@ -603,7 +603,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { logp.Info("Output is configured through Central Management") } else { msg := "no outputs are defined, please define one under the output section" - logp.Info(msg) + logp.Info("%s", msg) return nil, errors.New(msg) } } @@ -1055,7 +1055,7 @@ func (b *Beat) configure(settings Settings) error { } // log paths values to help with troubleshooting - logp.Info(paths.Paths.String()) + logp.Info("%s", paths.Paths.String()) metaPath := paths.Resolve(paths.Data, "meta.json") err = b.loadMeta(metaPath) diff --git a/libbeat/common/cli/confirm.go b/libbeat/common/cli/confirm.go index 7028561abe28..439174768fff 100644 --- a/libbeat/common/cli/confirm.go +++ b/libbeat/common/cli/confirm.go @@ -35,14 +35,14 @@ func Confirm(prompt string, def bool) (bool, error) { } func confirm(r io.Reader, out io.Writer, prompt string, def bool) (bool, error) { - options := " [Y/n]" + options := "[Y/n]" if !def { - options = " [y/N]" + options = "[y/N]" } reader := bufio.NewScanner(r) for { - fmt.Fprintf(out, prompt+options+":") + fmt.Fprintf(out, "%s %s:", prompt, options) if !reader.Scan() { break diff --git a/libbeat/common/cli/input.go b/libbeat/common/cli/input.go index 0d5163e4ec15..9584a525c45a 100644 --- a/libbeat/common/cli/input.go +++ b/libbeat/common/cli/input.go @@ -34,7 +34,7 @@ func ReadInput(prompt string) (string, error) { func input(r io.Reader, out io.Writer, prompt string) (string, error) { reader := bufio.NewScanner(r) - fmt.Fprintf(out, prompt+" ") + fmt.Fprintf(out, "%s ", prompt) if !reader.Scan() { return "", errors.New("error reading user input") diff --git a/libbeat/common/schema/mapstriface/mapstriface.go b/libbeat/common/schema/mapstriface/mapstriface.go index 209ac0e03ce1..d06119a5bf0a 100644 --- a/libbeat/common/schema/mapstriface/mapstriface.go +++ b/libbeat/common/schema/mapstriface/mapstriface.go @@ -72,6 +72,7 @@ package mapstriface import ( "encoding/json" + "errors" "fmt" "time" @@ -102,18 +103,19 @@ func (convMap ConvMap) Map(key string, event mapstr.M, data map[string]interface switch subData := d.(type) { case map[string]interface{}, mapstr.M: subEvent := mapstr.M{} - _, errors := convMap.Schema.ApplyTo(subEvent, subData.(map[string]interface{})) - for _, err := range errors { - if err, ok := err.(schema.KeyError); ok { - err.SetKey(convMap.Key + "." + err.Key()) + _, errs := convMap.Schema.ApplyTo(subEvent, subData.(map[string]interface{})) + for _, err := range errs { + var keyErr schema.KeyError + if errors.As(err, &keyErr) { + keyErr.SetKey(convMap.Key + "." + keyErr.Key()) } } event[key] = subEvent - return errors + return errs default: msg := fmt.Sprintf("expected dictionary, found %T", subData) err := schema.NewWrongFormatError(convMap.Key, msg) - logp.Err(err.Error()) + logp.Err("%s", err.Error()) return multierror.Errors{err} } } @@ -135,11 +137,11 @@ func toStrFromNum(key string, data map[string]interface{}) (interface{}, error) if err != nil { return "", schema.NewKeyNotFoundError(key) } - switch emptyIface.(type) { + switch val := emptyIface.(type) { case int, int32, int64, uint, uint32, uint64, float32, float64: return fmt.Sprintf("%v", emptyIface), nil case json.Number: - return string(emptyIface.(json.Number)), nil + return string(val), nil default: msg := fmt.Sprintf("expected number, found %T", emptyIface) return "", schema.NewWrongFormatError(key, msg) @@ -207,24 +209,23 @@ func toInteger(key string, data map[string]interface{}) (interface{}, error) { if err != nil { return 0, schema.NewKeyNotFoundError(key) } - switch emptyIface.(type) { + switch val := emptyIface.(type) { case int64: - return emptyIface.(int64), nil + return val, nil case int: - return int64(emptyIface.(int)), nil + return int64(val), nil case float64: - return int64(emptyIface.(float64)), nil + return int64(val), nil case json.Number: - num := emptyIface.(json.Number) - i64, err := num.Int64() + i64, err := val.Int64() if err == nil { return i64, nil } - f64, err := num.Float64() + f64, err := val.Float64() if err == nil { return int64(f64), nil } - msg := fmt.Sprintf("expected integer, found json.Number (%v) that cannot be converted", num) + msg := fmt.Sprintf("expected integer, found json.Number (%v) that cannot be converted", val) return 0, schema.NewWrongFormatError(key, msg) default: msg := fmt.Sprintf("expected integer, found %T", emptyIface) @@ -243,24 +244,23 @@ func toFloat(key string, data map[string]interface{}) (interface{}, error) { if err != nil { return 0.0, schema.NewKeyNotFoundError(key) } - switch emptyIface.(type) { + switch val := emptyIface.(type) { case float64: - return emptyIface.(float64), nil + return val, nil case int: - return float64(emptyIface.(int)), nil + return float64(val), nil case int64: - return float64(emptyIface.(int64)), nil + return float64(val), nil case json.Number: - num := emptyIface.(json.Number) - i64, err := num.Float64() + i64, err := val.Float64() if err == nil { return i64, nil } - f64, err := num.Float64() + f64, err := val.Float64() if err == nil { return f64, nil } - msg := fmt.Sprintf("expected float, found json.Number (%v) that cannot be converted", num) + msg := fmt.Sprintf("expected float, found json.Number (%v) that cannot be converted", val) return 0.0, schema.NewWrongFormatError(key, msg) default: msg := fmt.Sprintf("expected float, found %T", emptyIface) @@ -280,17 +280,11 @@ func toTime(key string, data map[string]interface{}) (interface{}, error) { return common.Time(time.Unix(0, 0)), schema.NewKeyNotFoundError(key) } - switch emptyIface.(type) { + switch val := emptyIface.(type) { case time.Time: - ts, ok := emptyIface.(time.Time) - if ok { - return common.Time(ts), nil - } + return common.Time(val), nil case common.Time: - ts, ok := emptyIface.(common.Time) - if ok { - return ts, nil - } + return val, nil } msg := fmt.Sprintf("expected date, found %T", emptyIface) diff --git a/libbeat/dashboards/get.go b/libbeat/dashboards/get.go index 2da82ef44447..7b983c290905 100644 --- a/libbeat/dashboards/get.go +++ b/libbeat/dashboards/get.go @@ -35,7 +35,7 @@ var ( // GetDashboard returns the dashboard with the given id with the index pattern removed func Get(client *kibana.Client, id string) ([]byte, error) { if client.Version.LessThan(MinimumRequiredVersionSavedObjects) { - return nil, fmt.Errorf("Kibana version must be at least " + MinimumRequiredVersionSavedObjects.String()) + return nil, fmt.Errorf("Kibana version must be at least %s", MinimumRequiredVersionSavedObjects.String()) } // add a special header for serverless, where saved_objects is "hidden" diff --git a/libbeat/dashboards/kibana_loader.go b/libbeat/dashboards/kibana_loader.go index 55d195c4f8e6..3320f996a2a5 100644 --- a/libbeat/dashboards/kibana_loader.go +++ b/libbeat/dashboards/kibana_loader.go @@ -21,8 +21,8 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/url" + "os" "path/filepath" "time" @@ -104,13 +104,13 @@ func getKibanaClient(ctx context.Context, cfg *config.C, retryCfg *Retry, retryA // ImportIndexFile imports an index pattern from a file func (loader KibanaLoader) ImportIndexFile(file string) error { if loader.version.LessThan(minimumRequiredVersionSavedObjects) { - return fmt.Errorf("Kibana version must be at least " + minimumRequiredVersionSavedObjects.String()) + return fmt.Errorf("Kibana version must be at least %s", minimumRequiredVersionSavedObjects.String()) } loader.statusMsg("Importing index file from %s", file) // read json file - reader, err := ioutil.ReadFile(file) + reader, err := os.ReadFile(file) if err != nil { return fmt.Errorf("fail to read index-pattern from file %s: %w", file, err) } @@ -127,7 +127,7 @@ func (loader KibanaLoader) ImportIndexFile(file string) error { // ImportIndex imports the passed index pattern to Kibana func (loader KibanaLoader) ImportIndex(pattern mapstr.M) error { if loader.version.LessThan(minimumRequiredVersionSavedObjects) { - return fmt.Errorf("kibana version must be at least " + minimumRequiredVersionSavedObjects.String()) + return fmt.Errorf("kibana version must be at least %s", minimumRequiredVersionSavedObjects.String()) } var errs multierror.Errors @@ -149,7 +149,7 @@ func (loader KibanaLoader) ImportIndex(pattern mapstr.M) error { // ImportDashboard imports the dashboard file func (loader KibanaLoader) ImportDashboard(file string) error { if loader.version.LessThan(minimumRequiredVersionSavedObjects) { - return fmt.Errorf("Kibana version must be at least " + minimumRequiredVersionSavedObjects.String()) + return fmt.Errorf("Kibana version must be at least %s", minimumRequiredVersionSavedObjects.String()) } loader.statusMsg("Importing dashboard from %s", file) @@ -158,7 +158,7 @@ func (loader KibanaLoader) ImportDashboard(file string) error { params.Set("overwrite", "true") // read json file - content, err := ioutil.ReadFile(file) + content, err := os.ReadFile(file) if err != nil { return fmt.Errorf("fail to read dashboard from file %s: %w", file, err) } @@ -203,7 +203,7 @@ func (loader KibanaLoader) addReferences(path string, dashboard []byte) (string, if _, ok := loader.loadedAssets[referencePath]; ok { continue } - refContents, err := ioutil.ReadFile(referencePath) + refContents, err := os.ReadFile(referencePath) if err != nil { return "", fmt.Errorf("fail to read referenced asset from file %s: %w", referencePath, err) } diff --git a/libbeat/processors/actions/decode_json_fields.go b/libbeat/processors/actions/decode_json_fields.go index b47ebd646d9c..ad8ffb6ade75 100644 --- a/libbeat/processors/actions/decode_json_fields.go +++ b/libbeat/processors/actions/decode_json_fields.go @@ -177,7 +177,7 @@ func (f *decodeJSONFields) Run(event *beat.Event) (*beat.Event, error) { } if len(errs) > 0 { - return event, fmt.Errorf(strings.Join(errs, ", ")) + return event, errors.New(strings.Join(errs, ", ")) } return event, nil } diff --git a/libbeat/processors/actions/include_fields.go b/libbeat/processors/actions/include_fields.go index 08419e7c2eca..718da6c456cd 100644 --- a/libbeat/processors/actions/include_fields.go +++ b/libbeat/processors/actions/include_fields.go @@ -84,7 +84,7 @@ func (f *includeFields) Run(event *beat.Event) (*beat.Event, error) { event.Fields = filtered if len(errs) > 0 { - return event, fmt.Errorf(strings.Join(errs, ", ")) + return event, errors.New(strings.Join(errs, ", ")) } return event, nil } diff --git a/metricbeat/helper/kubernetes/ktest/ktest.go b/metricbeat/helper/kubernetes/ktest/ktest.go index 2e811069b79f..5b1a73f7dbba 100644 --- a/metricbeat/helper/kubernetes/ktest/ktest.go +++ b/metricbeat/helper/kubernetes/ktest/ktest.go @@ -103,7 +103,7 @@ func TestMetricsFamilyFromFiles(t *testing.T, files []string, mapping *p.Metrics func TestMetricsFamilyFromFolder(t *testing.T, folder string, mapping *p.MetricsMapping) { files, err := getFiles(folder) if err != nil { - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } TestMetricsFamilyFromFiles(t, files, mapping) } diff --git a/metricbeat/module/elasticsearch/elasticsearch.go b/metricbeat/module/elasticsearch/elasticsearch.go index 41f498a9de4f..446a2d128bbf 100644 --- a/metricbeat/module/elasticsearch/elasticsearch.go +++ b/metricbeat/module/elasticsearch/elasticsearch.go @@ -558,7 +558,7 @@ func (l *License) ToMapStr() mapstr.M { func getSettingGroup(allSettings mapstr.M, groupKey string) (mapstr.M, error) { hasSettingGroup, err := allSettings.HasKey(groupKey) if err != nil { - return nil, fmt.Errorf("failure to determine if "+groupKey+" settings exist: %w", err) + return nil, fmt.Errorf("failure to determine if %s settings exist: %w", groupKey, err) } if !hasSettingGroup { @@ -567,12 +567,12 @@ func getSettingGroup(allSettings mapstr.M, groupKey string) (mapstr.M, error) { settings, err := allSettings.GetValue(groupKey) if err != nil { - return nil, fmt.Errorf("failure to extract "+groupKey+" settings: %w", err) + return nil, fmt.Errorf("failure to extract %s settings: %w", groupKey, err) } v, ok := settings.(map[string]interface{}) if !ok { - return nil, fmt.Errorf(groupKey + " settings are not a map") + return nil, fmt.Errorf("%s settings are not a map", groupKey) } return mapstr.M(v), nil diff --git a/metricbeat/module/prometheus/query/data.go b/metricbeat/module/prometheus/query/data.go index 298f1efd22d9..fbd535df1379 100644 --- a/metricbeat/module/prometheus/query/data.go +++ b/metricbeat/module/prometheus/query/data.go @@ -19,6 +19,7 @@ package query import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -120,8 +121,7 @@ func parseResponse(body []byte, pathConfig QueryConfig) ([]mb.Event, error) { } events = append(events, evnts...) default: - msg := fmt.Sprintf("Unknown resultType '%v'", resultType) - return events, fmt.Errorf(msg) + return events, fmt.Errorf("Unknown resultType '%v'", resultType) } return events, nil } @@ -223,8 +223,7 @@ func getEventFromScalarOrString(body []byte, resultType string, queryName string } else if resultType == "string" { value, ok := convertedArray.Data.Results[1].(string) if !ok { - msg := fmt.Sprintf("Could not parse value of result: %v", convertedArray.Data.Results) - return mb.Event{}, fmt.Errorf(msg) + return mb.Event{}, fmt.Errorf("Could not parse value of result: %v", convertedArray.Data.Results) } return mb.Event{ Timestamp: getTimestamp(timestamp), @@ -249,8 +248,7 @@ func getTimestampFromVector(vector []interface{}) (float64, error) { } timestamp, ok := vector[0].(float64) if !ok { - msg := fmt.Sprintf("Could not parse timestamp of result: %v", vector) - return 0, fmt.Errorf(msg) + return 0, fmt.Errorf("Could not parse timestamp of result: %v", vector) } return timestamp, nil } @@ -258,17 +256,15 @@ func getTimestampFromVector(vector []interface{}) (float64, error) { func getValueFromVector(vector []interface{}) (float64, error) { // Example input: [ , "" ] if len(vector) != 2 { - return 0, fmt.Errorf("could not parse results") + return 0, errors.New("could not parse results") } value, ok := vector[1].(string) if !ok { - msg := fmt.Sprintf("Could not parse value of result: %v", vector) - return 0, fmt.Errorf(msg) + return 0, fmt.Errorf("Could not parse value of result: %v", vector) } val, err := strconv.ParseFloat(value, 64) if err != nil { - msg := fmt.Sprintf("Could not parse value of result: %v", vector) - return 0, fmt.Errorf(msg) + return 0, fmt.Errorf("Could not parse value of result: %v", vector) } return val, nil } diff --git a/packetbeat/beater/processor.go b/packetbeat/beater/processor.go index 135f0c18ac7c..0e32723c6ee6 100644 --- a/packetbeat/beater/processor.go +++ b/packetbeat/beater/processor.go @@ -158,7 +158,7 @@ func (p *processorFactory) Create(pipeline beat.PipelineConnector, cfg *conf.C) if config.Interfaces[0].File == "" { err = watch.Init(config.Procs) if err != nil { - logp.Critical(err.Error()) + logp.Critical("%s", err.Error()) return nil, err } } else { diff --git a/packetbeat/config/agent.go b/packetbeat/config/agent.go index c5ccd589c07b..ba86116f1666 100644 --- a/packetbeat/config/agent.go +++ b/packetbeat/config/agent.go @@ -122,7 +122,7 @@ func NewAgentConfig(cfg *conf.C) (Config, error) { return config, err } - logp.Debug("agent", fmt.Sprintf("Found %d inputs", len(input.Streams))) + logp.Debug("agent", "Found %d inputs", len(input.Streams)) for _, stream := range input.Streams { if interfaceOverride, ok := stream["interface"]; ok { cfg, err := conf.NewConfigFrom(interfaceOverride) @@ -153,7 +153,7 @@ func NewAgentConfig(cfg *conf.C) (Config, error) { if !ok { return config, fmt.Errorf("invalid input type of: '%T'", rawStreamType) } - logp.Debug("agent", fmt.Sprintf("Found agent configuration for %v", streamType)) + logp.Debug("agent", "Found agent configuration for %v", streamType) cfg, err := conf.NewConfigFrom(stream) if err != nil { return config, err diff --git a/packetbeat/procs/procs_linux.go b/packetbeat/procs/procs_linux.go index cabddde3be84..8ad952a1e26e 100644 --- a/packetbeat/procs/procs_linux.go +++ b/packetbeat/procs/procs_linux.go @@ -136,14 +136,14 @@ func findSocketsOfPid(prefix string, pid int) (inodes []uint64, err error) { for _, name := range names { link, err := os.Readlink(filepath.Join(dirname, name)) if err != nil { - logp.Debug("procs", err.Error()) + logp.Debug("procs", "%s", err.Error()) continue } if strings.HasPrefix(link, "socket:[") { inode, err := strconv.ParseInt(link[8:len(link)-1], 10, 64) if err != nil { - logp.Debug("procs", err.Error()) + logp.Debug("procs", "%s", err.Error()) continue } diff --git a/packetbeat/protos/http/http_test.go b/packetbeat/protos/http/http_test.go index c8c7a9c73448..50bf9e0874a5 100644 --- a/packetbeat/protos/http/http_test.go +++ b/packetbeat/protos/http/http_test.go @@ -986,7 +986,7 @@ func TestHttpParser_RedactAuthorization_raw(t *testing.T) { rawMessageObscured := bytes.Index(msg, []byte("uthorization:*")) if rawMessageObscured < 0 { - t.Errorf("Obscured authorization string not found: " + string(msg[:])) + t.Error("Obscured authorization string not found: " + string(msg[:])) } } @@ -1021,7 +1021,7 @@ func TestHttpParser_RedactAuthorization_Proxy_raw(t *testing.T) { rawMessageObscured := bytes.Index(msg, []byte("uthorization:*")) if rawMessageObscured < 0 { - t.Errorf("Failed to redact proxy-authorization header: " + string(msg[:])) + t.Error("Failed to redact proxy-authorization header: " + string(msg[:])) } } From cc4f9512b71927a1ec91a680c20eab855f934d07 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 21 Oct 2024 17:24:09 +0200 Subject: [PATCH 58/90] [DOCS] Fix typo in changelog (#41315) h/t @lhirlimann --- CHANGELOG.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index ab562abbb54a..449bc30dd529 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -474,7 +474,7 @@ https://github.com/elastic/beats/compare/v8.13.2\...v8.13.3[View commits] *Metricbeat* -- Add new fields to configure the lease duration, retry and renew when using leader elector with Kubernetes autodiscover.{pull}38471[38471] +- Add new fields to configure the lease duration, retry and renew when using leader elector with Kubernetes autodiscover. {pull}38471[38471] [[release-notes-8.13.2]] From 7ca9893ca0f1e482a3667801f29e6239edfd0127 Mon Sep 17 00:00:00 2001 From: Vinit Chauhan Date: Mon, 21 Oct 2024 13:00:14 -0400 Subject: [PATCH 59/90] [Filebeat][udp] - Remove unnecessary multiplication (#41211) Remove unnecessary multiplication --------- Co-authored-by: Denis Co-authored-by: Kush Rana <89848966+kush-elastic@users.noreply.github.com> --- filebeat/inputsource/udp/server.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/filebeat/inputsource/udp/server.go b/filebeat/inputsource/udp/server.go index 75c74a1f3d01..3a0b429f8519 100644 --- a/filebeat/inputsource/udp/server.go +++ b/filebeat/inputsource/udp/server.go @@ -20,8 +20,6 @@ package udp import ( "net" - "github.com/dustin/go-humanize" - "github.com/elastic/beats/v7/filebeat/inputsource" "github.com/elastic/beats/v7/filebeat/inputsource/common/dgram" "github.com/elastic/elastic-agent-libs/logp" @@ -62,12 +60,13 @@ func (u *Server) createConn() (net.PacketConn, error) { if err != nil { return nil, err } - socketSize := int(u.config.ReadBuffer) * humanize.KiByte - if socketSize != 0 { + + if int(u.config.ReadBuffer) != 0 { if err := listener.SetReadBuffer(int(u.config.ReadBuffer)); err != nil { return nil, err } } + u.localaddress = listener.LocalAddr().String() return listener, err From ec92e0213d00cd50d54d5880a57f34ba4cbe0c0a Mon Sep 17 00:00:00 2001 From: niraj-elastic <124254029+niraj-elastic@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:12:45 +0530 Subject: [PATCH 60/90] [vSphere] Add id field to all the metricsets (#41097) * add id field * update changelog * fix unit test * update field descriptions * update fields.asci * fix lint * address review comments * update fields.asci --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 102 +++++++++++++++--- .../module/vsphere/cluster/_meta/data.json | 1 + .../module/vsphere/cluster/_meta/fields.yml | 16 +-- .../module/vsphere/cluster/cluster_test.go | 1 + metricbeat/module/vsphere/cluster/data.go | 1 + .../module/vsphere/cluster/data_test.go | 6 ++ .../module/vsphere/datastore/_meta/data.json | 3 +- .../module/vsphere/datastore/_meta/fields.yml | 5 +- metricbeat/module/vsphere/datastore/data.go | 1 + .../module/vsphere/datastore/data_test.go | 6 ++ .../vsphere/datastorecluster/_meta/data.json | 1 + .../vsphere/datastorecluster/_meta/fields.yml | 10 +- .../module/vsphere/datastorecluster/data.go | 1 + .../vsphere/datastorecluster/data_test.go | 8 ++ metricbeat/module/vsphere/fields.go | 2 +- .../module/vsphere/host/_meta/data.json | 1 + .../module/vsphere/host/_meta/fields.yml | 4 + metricbeat/module/vsphere/host/data.go | 1 + metricbeat/module/vsphere/host/data_test.go | 10 ++ .../module/vsphere/network/_meta/data.json | 1 + .../module/vsphere/network/_meta/fields.yml | 6 +- metricbeat/module/vsphere/network/data.go | 1 + .../module/vsphere/network/data_test.go | 6 +- .../vsphere/resourcepool/_meta/data.json | 1 + .../vsphere/resourcepool/_meta/fields.yml | 4 + .../module/vsphere/resourcepool/data.go | 1 + .../module/vsphere/resourcepool/data_test.go | 10 +- .../vsphere/virtualmachine/_meta/data.json | 1 + .../vsphere/virtualmachine/_meta/fields.yml | 16 +-- .../module/vsphere/virtualmachine/data.go | 1 + .../vsphere/virtualmachine/data_test.go | 8 ++ 32 files changed, 200 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8eebaa311378..bac0ca314e83 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -365,6 +365,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Added Cisco Meraki module {pull}40836[40836] - Added Palo Alto Networks module {pull}40686[40686] - Restore docker.network.in.* and docker.network.out.* fields in docker module {pull}40968[40968] +- Add `id` field to all the vSphere metricsets. {pull}41097[41097] - Only watch metadata for ReplicaSets in metricbeat k8s module {pull}41289[41289] *Metricbeat* diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 783e19ffe971..915cfa33f0a7 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -67030,7 +67030,7 @@ Cluster information. *`vsphere.cluster.datastore.names`*:: + -- -List of all the Datastore names associated with the cluster. +List of all the datastore names associated with the cluster. type: keyword @@ -67040,7 +67040,7 @@ type: keyword *`vsphere.cluster.datastore.count`*:: + -- -Number of Datastores associated with the cluster. +Number of datastores associated with the cluster. type: long @@ -67072,7 +67072,7 @@ type: boolean *`vsphere.cluster.host.count`*:: + -- -Number of Hosts associated with the cluster. +Number of hosts associated with the cluster. type: long @@ -67082,7 +67082,17 @@ type: long *`vsphere.cluster.host.names`*:: + -- -List of all the Host names associated with the cluster. +List of all the host names associated with the cluster. + + +type: keyword + +-- + +*`vsphere.cluster.id`*:: ++ +-- +Unique cluster ID. type: keyword @@ -67103,7 +67113,7 @@ type: keyword *`vsphere.cluster.network.count`*:: + -- -Number of Networks associated with the cluster. +Number of networks associated with the cluster. type: long @@ -67113,7 +67123,7 @@ type: long *`vsphere.cluster.network.names`*:: + -- -List of all the Network names associated with the cluster. +List of all the network names associated with the cluster. type: keyword @@ -67247,6 +67257,16 @@ type: long List of all the host names. +type: keyword + +-- + +*`vsphere.datastore.id`*:: ++ +-- +Unique datastore ID. + + type: keyword -- @@ -67332,10 +67352,20 @@ Datastore Cluster +*`vsphere.datastorecluster.id`*:: ++ +-- +Unique datastore cluster ID. + + +type: keyword + +-- + *`vsphere.datastorecluster.name`*:: + -- -The Datastore Cluster name. +The datastore cluster name. type: keyword @@ -67369,7 +67399,7 @@ format: bytes *`vsphere.datastorecluster.datastore.names`*:: + -- -List of all the Datastore names associated with the Datastore Cluster. +List of all the datastore names associated with the datastore cluster. type: keyword @@ -67379,7 +67409,7 @@ type: keyword *`vsphere.datastorecluster.datastore.count`*:: + -- -Number of datastores in the Datastore Cluster. +Number of datastores in the datastore cluster. type: long @@ -67521,6 +67551,16 @@ format: bytes -- +*`vsphere.host.id`*:: ++ +-- +Unique host ID. + + +type: keyword + +-- + *`vsphere.host.memory.free.bytes`*:: + -- @@ -67841,6 +67881,16 @@ type: long -- +*`vsphere.network.id`*:: ++ +-- +Unique network ID. + + +type: keyword + +-- + *`vsphere.network.name`*:: + -- @@ -67854,7 +67904,7 @@ type: keyword *`vsphere.network.status`*:: + -- -General health of the Network. +General health of the network. type: keyword @@ -67951,6 +68001,16 @@ type: long -- +*`vsphere.resourcepool.id`*:: ++ +-- +Unique resource pool ID. + + +type: keyword + +-- + *`vsphere.resourcepool.memory.usage.guest.bytes`*:: @@ -68162,6 +68222,16 @@ type: keyword Hostname of the host. +type: keyword + +-- + +*`vsphere.virtualmachine.id`*:: ++ +-- +Unique virtual machine ID. + + type: keyword -- @@ -68169,7 +68239,7 @@ type: keyword *`vsphere.virtualmachine.name`*:: + -- -Virtual Machine name. +Virtual machine name. type: keyword @@ -68179,7 +68249,7 @@ type: keyword *`vsphere.virtualmachine.os`*:: + -- -Virtual Machine Operating System name. +Virtual machine Operating System name. type: keyword @@ -68219,7 +68289,7 @@ type: long *`vsphere.virtualmachine.memory.used.guest.bytes`*:: + -- -Used Memory of Guest in bytes. +Used memory of Guest in bytes. type: long @@ -68231,7 +68301,7 @@ format: bytes *`vsphere.virtualmachine.memory.used.host.bytes`*:: + -- -Used Memory of Host in bytes. +Used memory of Host in bytes. type: long @@ -68243,7 +68313,7 @@ format: bytes *`vsphere.virtualmachine.memory.total.guest.bytes`*:: + -- -Total Memory of Guest in bytes. +Total memory of Guest in bytes. type: long @@ -68255,7 +68325,7 @@ format: bytes *`vsphere.virtualmachine.memory.free.guest.bytes`*:: + -- -Free Memory of Guest in bytes. +Free memory of Guest in bytes. type: long diff --git a/metricbeat/module/vsphere/cluster/_meta/data.json b/metricbeat/module/vsphere/cluster/_meta/data.json index d704ceb865a9..67d0e742d152 100644 --- a/metricbeat/module/vsphere/cluster/_meta/data.json +++ b/metricbeat/module/vsphere/cluster/_meta/data.json @@ -49,6 +49,7 @@ "entity_name": "121.0.0.0" } ], + "id": "domain-c1", "name": "Cluster_1", "das_config": { "enabled": false, diff --git a/metricbeat/module/vsphere/cluster/_meta/fields.yml b/metricbeat/module/vsphere/cluster/_meta/fields.yml index 27025e928485..ce72f97fe8f1 100644 --- a/metricbeat/module/vsphere/cluster/_meta/fields.yml +++ b/metricbeat/module/vsphere/cluster/_meta/fields.yml @@ -10,11 +10,11 @@ - name: names type: keyword description: > - List of all the Datastore names associated with the cluster. + List of all the datastore names associated with the cluster. - name: count type: long description: > - Number of Datastores associated with the cluster. + Number of datastores associated with the cluster. - name: das_config type: group fields: @@ -32,11 +32,15 @@ - name: count type: long description: > - Number of Hosts associated with the cluster. + Number of hosts associated with the cluster. - name: names type: keyword description: > - List of all the Host names associated with the cluster. + List of all the host names associated with the cluster. + - name: id + type: keyword + description: > + Unique cluster ID. - name: name type: keyword description: > @@ -47,11 +51,11 @@ - name: count type: long description: > - Number of Networks associated with the cluster. + Number of networks associated with the cluster. - name: names type: keyword description: > - List of all the Network names associated with the cluster. + List of all the network names associated with the cluster. - name: triggered_alarms.* type: object object_type: keyword diff --git a/metricbeat/module/vsphere/cluster/cluster_test.go b/metricbeat/module/vsphere/cluster/cluster_test.go index 37e286c113da..3fa1b534c797 100644 --- a/metricbeat/module/vsphere/cluster/cluster_test.go +++ b/metricbeat/module/vsphere/cluster/cluster_test.go @@ -51,6 +51,7 @@ func TestFetchEventContents(t *testing.T) { testEvent := mapstr.M{ "name": "DC0_C0", + "id": "domain-c29", "host": mapstr.M{ "count": 3, "names": []string{"DC0_C0_H0", "DC0_C0_H1", "DC0_C0_H2"}, diff --git a/metricbeat/module/vsphere/cluster/data.go b/metricbeat/module/vsphere/cluster/data.go index a15e48377b5b..286e4dbb2d8e 100644 --- a/metricbeat/module/vsphere/cluster/data.go +++ b/metricbeat/module/vsphere/cluster/data.go @@ -25,6 +25,7 @@ import ( func (m *ClusterMetricSet) mapEvent(cl mo.ClusterComputeResource, data *metricData) mapstr.M { event := mapstr.M{ + "id": cl.Self.Value, "host": mapstr.M{ "count": len(data.assetNames.outputHostNames), "names": data.assetNames.outputHostNames, diff --git a/metricbeat/module/vsphere/cluster/data_test.go b/metricbeat/module/vsphere/cluster/data_test.go index 6736b8b33f71..2886e376f7af 100644 --- a/metricbeat/module/vsphere/cluster/data_test.go +++ b/metricbeat/module/vsphere/cluster/data_test.go @@ -33,6 +33,11 @@ func TestEventMapping(t *testing.T) { ComputeResource: mo.ComputeResource{ ManagedEntity: mo.ManagedEntity{ Name: "Cluster_0", + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{ + Value: "CS_0", + }, + }, }, }, Configuration: types.ClusterConfigInfo{ @@ -64,6 +69,7 @@ func TestEventMapping(t *testing.T) { }, }, "name": "Cluster_0", + "id": "CS_0", "host": mapstr.M{ "count": 1, "names": []string{"Host_0"}, diff --git a/metricbeat/module/vsphere/datastore/_meta/data.json b/metricbeat/module/vsphere/datastore/_meta/data.json index dc690b8d40e9..12f9ab59ad19 100644 --- a/metricbeat/module/vsphere/datastore/_meta/data.json +++ b/metricbeat/module/vsphere/datastore/_meta/data.json @@ -58,7 +58,8 @@ } }, "fstype": "local", - "name": "LocalDS_0" + "name": "LocalDS_0", + "id": "datastore-0" } } } \ No newline at end of file diff --git a/metricbeat/module/vsphere/datastore/_meta/fields.yml b/metricbeat/module/vsphere/datastore/_meta/fields.yml index a9b9352f67f4..681a19d505e0 100644 --- a/metricbeat/module/vsphere/datastore/_meta/fields.yml +++ b/metricbeat/module/vsphere/datastore/_meta/fields.yml @@ -51,6 +51,10 @@ type: keyword description: > List of all the host names. + - name: id + type: keyword + description: > + Unique datastore ID. - name: name type: keyword description: > @@ -82,4 +86,3 @@ description: > Rate of writing data to the datastore. format: bytes - diff --git a/metricbeat/module/vsphere/datastore/data.go b/metricbeat/module/vsphere/datastore/data.go index 1950d32a4749..0a55984c51be 100644 --- a/metricbeat/module/vsphere/datastore/data.go +++ b/metricbeat/module/vsphere/datastore/data.go @@ -26,6 +26,7 @@ import ( func (m *DataStoreMetricSet) mapEvent(ds mo.Datastore, data *metricData) mapstr.M { event := mapstr.M{ "name": ds.Summary.Name, + "id": ds.Self.Value, "fstype": ds.Summary.Type, "status": ds.OverallStatus, "host": mapstr.M{ diff --git a/metricbeat/module/vsphere/datastore/data_test.go b/metricbeat/module/vsphere/datastore/data_test.go index 23189e3e49f9..1fb3d4ef8530 100644 --- a/metricbeat/module/vsphere/datastore/data_test.go +++ b/metricbeat/module/vsphere/datastore/data_test.go @@ -37,6 +37,11 @@ func TestEventMapping(t *testing.T) { }, ManagedEntity: mo.ManagedEntity{ OverallStatus: "green", + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{ + Value: "DS_1", + }, + }, }, Host: []types.DatastoreHostMount{}, Vm: []types.ManagedObjectReference{ @@ -61,6 +66,7 @@ func TestEventMapping(t *testing.T) { outputEvent := (&DataStoreMetricSet{}).mapEvent(datastoreTest, &metricDataTest) testEvent := mapstr.M{ "fstype": "local", + "id": "DS_1", "status": types.ManagedEntityStatus("green"), "name": "datastore-test", "host": mapstr.M{ diff --git a/metricbeat/module/vsphere/datastorecluster/_meta/data.json b/metricbeat/module/vsphere/datastorecluster/_meta/data.json index 10a4d2f98de8..dd44a7c435d2 100644 --- a/metricbeat/module/vsphere/datastorecluster/_meta/data.json +++ b/metricbeat/module/vsphere/datastorecluster/_meta/data.json @@ -15,6 +15,7 @@ }, "vsphere": { "datastorecluster": { + "id": "group-p1", "name": "datastore_cluster1", "capacity": { "bytes": 8795019280384 diff --git a/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml b/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml index 9fcceb295ad5..7edad537dbda 100644 --- a/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml +++ b/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml @@ -4,10 +4,14 @@ description: > Datastore Cluster fields: + - name: id + type: keyword + description: > + Unique datastore cluster ID. - name: name type: keyword description: > - The Datastore Cluster name. + The datastore cluster name. - name: capacity.bytes type: long description: > @@ -21,11 +25,11 @@ - name: datastore.names type: keyword description: > - List of all the Datastore names associated with the Datastore Cluster. + List of all the datastore names associated with the datastore cluster. - name: datastore.count type: long description: > - Number of datastores in the Datastore Cluster. + Number of datastores in the datastore cluster. - name: triggered_alarms.* type: object object_type: keyword diff --git a/metricbeat/module/vsphere/datastorecluster/data.go b/metricbeat/module/vsphere/datastorecluster/data.go index 3dcce129e562..1b4789aee530 100644 --- a/metricbeat/module/vsphere/datastorecluster/data.go +++ b/metricbeat/module/vsphere/datastorecluster/data.go @@ -26,6 +26,7 @@ import ( func (m *DatastoreClusterMetricSet) mapEvent(datastoreCluster mo.StoragePod, data *metricData) mapstr.M { event := mapstr.M{ "name": datastoreCluster.Name, + "id": datastoreCluster.Self.Value, "capacity": mapstr.M{ "bytes": datastoreCluster.Summary.Capacity, }, diff --git a/metricbeat/module/vsphere/datastorecluster/data_test.go b/metricbeat/module/vsphere/datastorecluster/data_test.go index 41d1736777ef..05f43ef002db 100644 --- a/metricbeat/module/vsphere/datastorecluster/data_test.go +++ b/metricbeat/module/vsphere/datastorecluster/data_test.go @@ -34,6 +34,11 @@ func TestEventMapping(t *testing.T) { Folder: mo.Folder{ ManagedEntity: mo.ManagedEntity{ Name: "Folder1", + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{ + Value: "FL_0", + }, + }, }, }, } @@ -43,6 +48,9 @@ func TestEventMapping(t *testing.T) { name, _ := event.GetValue("name") assert.Equal(t, "Folder1", name) + id, _ := event.GetValue("id") + assert.Equal(t, "FL_0", id) + capacity, _ := event.GetValue("capacity.bytes") assert.Equal(t, int64(100), capacity) diff --git a/metricbeat/module/vsphere/fields.go b/metricbeat/module/vsphere/fields.go index 7e3988627f80..c67607c61002 100644 --- a/metricbeat/module/vsphere/fields.go +++ b/metricbeat/module/vsphere/fields.go @@ -32,5 +32,5 @@ func init() { // AssetVsphere returns asset data. // This is the base64 encoded zlib format compressed contents of module/vsphere. func AssetVsphere() string { - return "eJzUXM1u4zgSvvdTFOay04u0HyCHBXrTmOkBNj2DTibXoEyVLW4oUktSNtxPvyApybL1Y1um1LEPOTgy66v/YrGoT/BGu3vYmDwlTR8ALLeC7uGXzZP/5pcPAAkZpnluuZL38K8PAADlfyFTSSHczzQJQkP3sMYPACtOIjH3/tFPIDGjJgn3sbvcPaxVkZffdFA5XKi5GBOFsaTr77sWdJ8a1pIsNr7vJBY+D2Fp4HKldIbukUXjgWNETVQJWjRW1UwOY+tbrbmi+2ta/61WfKPdVumk4/8D/FWf/3BjQa0AhQCbEnypwAeigMYoxtFSAltuU/9MKfZFL16mCml78Qol1+PAfiuyJWkHt4Z5AcK9fswrU3LFj1GMVxAmGTfG2QhT0molFiRxKahLKYHKUilBKMfJ4Q+ZcIaWDGxTsilpMFZzZvc4oMQB3EAJpV9dM2OtosbXz7AitIWmXpQVwlSZY3sar6zprfOrMnak68zo6g7lpV7ehNmpkG6EJ9BVwdat2kOP7FbptxuygW8B8fs3gxLoWEuwmq/XpCl5RYE6M4t/dipJLf9L7Fji4cvXKyznmJkaDZRoWjVDV3butqMB0l2LHNY+1WeoUGCYI+N2t1hposVyZ1sq7zXHE2L5TROBX9AJxwmmRnxsc6G8uYc2+RZMqyyKqDif3YrxgRaGkqg4/zaUTAQzb3lFAGkYCkpeV0Lh8QMnwP5FmpG0uCYHtgZa0wVPtwd2Hn7cXThx87ao0ceU74Mvxwrnt4b/oAhCPsRaGFzH9bDnlAAzl0UcWAfSybsWMSu0JmnFDpbE5dqVY6bIvBFdzVWu1Ya7Ii+ykX9usWPIAhqekIPmzMbBx2sYWBmHLl7l8BsXZHbGUuZX6a8eF10Zf6yk9lneLW0GqHbl8YjpLq1LuDnKtMO9YTdFTRjXKr+j9fHALew8yRkfrLTKro0QxqItIurmya93InTdWtXUxLzJJnKil8d+ihM70MvjkPtsNbdx00ZlzW7l2pqtGmPL/ZXthG2xfQh4aBEZKnXjBqLng0bV6d3jJDVLqF3rjO/9nps6d+YquQMug9JGJEpN9GpyZJHLFg/aLQ5+cVAyKuy9DU/suOf0KVsm0tcPrDBPE96Sfa+Sy0uA3UqS6OnSXbyr9g2pRsM9pPmqXUhyw7WSGUm7uG7bnRdh65WlP2LuDx/++tsp+PHrj54olBflFjoe4eDP51D2PYZ4hH2H4RTd2aJBchgNfrqfK1mX5z1Y3u/2VOkKPknr0I3bpSa04YwEWpJst8ANOQSLLNpmNSzYYNLyjIBbsPhGxtVUTGW5IEuAEp4env5wX2QokxBW8nRnOEMBAaiz44wLwQ0xJZM+E3KcVTyVvhyLo698nZKxUC4PGxQFATKtjPGG7ogb37xxaqq3f+cDj743q5Qgax8IPTJHqLFH4+YNCFkKAeJIg4pfjPfBd5Qsybouj4M/fvf0qch83HH4vMidcXsxgfYnbg3QV9R0GWVKT9WkfvSLX49uqt50LHzTtKSvRhd3X1afKA4e4r1GLgYODrAGKU9dhsjzkUxTgsjq0PFkAVLhWKJMtjyx6cJqlCZzkW+aJOFCEqCFbcpZGhofWzTQIAtJoV014pBzaUlvUCzg2W1PNeWaDElr/H9r1FW7rWLncgdoyUETI76ZVwgVzfchgckyVeUfTZ27nFWzH9JWFc46pTGavxzZG1lzYOfTeGFJqcnmsBNW0GrTmxZXTeYsUKS10nOKzfdvAtXLRViinUmSTajjpOpdLSbIULbIIajnIcwKYTlDY2dQfU1rvO/s4U6s+zbWyzTfkOvkym9hPQ9iolWeUzKD4puZoBJnRf0iqBMrvc5QV4Gcz9sH4cU++XxOCZQrcISAlFDYtCTha+O6S+HS+Ilm7u31vpuYi9zynu3UuA6et5dy2UNZls0eH9UvFe3PPlWd7Bx5w7UtUECGLOWyrw3bP1Ia7ZS03JF+0r7/mYy5RICMkTF8KbrtqXsk+4Sk2qPYaMExZ0HJqptogO2HsawCP22UUDgl7Nxk7Gdq3e8WsaNLG7XTaTn0k6KBhCwxv5WooYfjI25MMTAS1AnxnV3P+OYPN8stnh82cjzKwK/vT/ZpBWaddr4I2jTNJyeqwc3wVOnvd5Iu91Wpr4TwbQhC3CG4511+zDn8Sov14q5C8euTRZmgTj7ewRdurObLwlLyEgJmrrT92Berb85LWlngXTrMWJS3Vh/t5wKNKjSjXCkxRcr9Xq4PjsDFQwQnxgWi+UA46G2fwkME2/o3Gs78kXxO2rMvGflgx43lzNx1HtU30SWUoewai3gf8PxxtKDseDy9ia9LKTCgmOb63Xyf5P1M/qF1Ku9kUblFxf0d2BRt8P+Xx3AvznPtwsIdoAGGghWhrFzu4Mv3p7bA4CjZcdaj09i8BWIHjDW15qfI8Tj0dce3cGwW1/EmsZp1QcZ2NssPaVwv4d8dpVIwUFgu+I8Q4Y6cqPMosGapt19+zJjfNc3Blz81nJStuSLIHNI6jCFBbHv5RIsfl1hKGWPm4r6MMqXFnB9oYllRrvkGe0ZRTvJ6Bp+OR1eSOx/oVzE3sNYoy4KxxWwoe6SSn0yKrjBL917Wxf55rIe1boHzBrv1r0pJLMluiWT72sFFktiib7DegCiq1sE/jEcdBr/Hc75EIVTv5bNYvDevAZYUIdF841/Jcalvn8eY2pBOCfvfg3CLqWEfH3+tJPQx2M6WCwFLqgcK29aDS7Uh4NaEIme/4v8Krn2gjVBmVKOns6WPvYQqhXd4V++ArFWgC2eAL4/XZRGmslyT6RsLO8n1mX6053ZPsFLknsnm/O/L41hnmu6yj2z09nRzp3+7B1wRO2s/oVtVnqwM6AJmbv2NgHirvbSS1ZLTk920Afq9K11+qybqiQrvT8JXWUu4YTTwKiTHRUf4iki+GcoGxjOjhtCyzw+PZWLvH9FVEaPmMdU/c9Lob70+hcO7gQubE1/RSme/ovWdDOnNmRAi39X6vEEucCnoJPHmsHp/YyvGyLpalf2sOJP1vc2qSFjLq4lRLinEF+zBVYV4kvVmGB9t895HBLCsMFZlryGlzJm4HzzhMpe9txsPN/USyoPz2/1dzsad7upM9LBg+ZkHuEnnGyjPwTnVO+3mUpDseLPd+9PPWJRT7SH/PNw/okwO9pBnHYpFHzDcjxaWY3v70cIeqUjMTaoiVvpypVr7LRgO3XBe+G7J4AtZ5KK244oXU7+7Yz7r9U2N2lYvQ3Ir29b/BwAA//+me5hU" + return "eJzUXM1y2zgSvucpuuayyZajB/Bhq7JOzThV68xU7PjqgsCWiDUIcABQKuXppwDwTyJBSRRIWzr4IFPor3/R3WjwM7zi7hY2Ok9R4QcAwwzHW/ht8+i++e0DQIKaKpYbJsUt/OcDAED5X8hkUnD7M4UcicZbWJMPACuGPNG37tHPIEiGbRL2Y3a5fVjJIi+/6aGyv1B7McoLbVDV3/ctaD81rCUa0vq+l5j/3PmlgYmVVBmxjyxaDxwiaqNKiCHayJrJYWyh1dor2r+6899qxVfcbaVKev4/wF/1+R/TBuQKCOdgUmzAe6JAtJaUEYMJbJlJ3TOl2BdBvFQWwgTxcinW48B+L7IlKgu3hnkGwkY/+oVKsWKHKMYriCQZ09raCJXCKMkXKMiSY59SPJWllByJGCeHbyJhlBjUsE3RpKhAG8WoaXBAiQOYhhJKWF0zY62ixv0XWCExhcIgygphKvWhPY1X1vTWaeGOdJ0ZXd2iPNfLK5jsEMMQviPYfgr2d1HTg29f+0nav/GIVvHdrhqgh2Yr1esVmV2J+P1bXgl0rPEZxdZrVJi8EE5Uphf/7lWSXP4f6aHE/ZcvF1jOITM1GijRdNKUvoSg344GSPctsp9uVZ+h3ISSnFBmdouVQlwsd6aj8qA5HhHL7woR3IJWOHupxKHN+YzqFrrkOzCNNIRHxflkV4wPtNCYRMX5U2MyEcy84xUepKaEY/Ky4pIcPnAE7F+oKApD1riXnNV0wdENwM79j/tzNaZfFzX6mPK9cxlgYf1Ws18YQcj7WAtN1nE97ClFIJndRSxYC9LKuxYxLZRCYfgOlsjE2maAusicEV3MVa7khtm8MrKRf+mwo9EA0SxBC82ajYVPLmFgpS26eJnD74yj3mmDmVslnLAu+nb8sZI6SC4HqPbt4xG3uyZrnC0zbOLJPLnh170auJ+iQhLXFX4Q44KQXdi6r2UaVkpml4YlbYgpIhrEo1vvSLy8tlStjXmTTeS5zw9hihN77fPDkM9uFTNx96rKmu3KtTUbOcaWO1n0hF2/xvPvOkSG0upJg968hfFT2kc7HAgnSc58kl6nNi7WMF0nCblMboAJbygjMgKF+KJzQiPnZw60XRzc4iBFVNiN30wcLE7pAXdMJNRrrTBPE1JbfWAmzgF2LRtToAN6dvvg3qZtrcMMn1pUrVgUG6akyFCYxWX9hbzwNWaW/opZCN/99dMq+OH+VyAK5UXZK4hH2PvzKZRdMyUeYddKOUb3raLBm/u5FHUdEsDyfutwqSr4KIxFN64cT3DDKHJiUNDdgmzQIlhk0apyv2CLScMyBGbAkFfUNo+jMss5GgQi4PHu8Zv9IiMi8WElT3eaUcLBA7V2nDHOmUYqRRIyIctZxVPpy7E4umfrFLWBcnnYEF4gEKqk1s7QLXHtulRWTXWdezrw6PVgpQRR+4BvBlpCrbqQ6VdAQlPwEEcaVPwCIATfUjIo6logDv74beLHInNxx+JzIrfG7cQEyp1mtkBfkNNNUDg4ww3VChlmUk3V/X9wi18gjRLdVE3/WPim6fVfjC5uHXhf9fkGT0dfIicf39sng4OUp057xOlIpkl56tPcowlPhWNJRLJliUkXRhGhMxtpp9mUbAgEYmCbMpr65s6WaGiRhaRQNvuxyJkwqDaEL+DJlsMKc4UahdHuvzXqqqVYsXO+A3TkoJAi28wrhIrm+5DAZDtj5R9tnds9smbfb5NVOOuVxmj+ckJf0eg9O5/GC0tKbTaHnbCCVpvetLhqMieBQqWkmlNsrl/kqZ4vwhLtTJJsQx0nVedqMUH6tEUMQT0NYVZwwyjRZgbV17TG+04Dd2Ldd7Gep/mWXCdXfgfraRATJfMckxkU394JKnFW1M+COrHS6x3qIpDzefsgvNinu08pgrQJDueQIuEmLUm43Ljuitht/Ejz+Pp67W3MRW5YoJwa1zF09lIuuy/Lsrnkovq5on3rk+PJzso3TJmCcMgITZkItX3Ds7rRjoTLivSzcv3WZMyFEEIpas2WvN+e+sfrj0iqO1ZPDFjmDEhRdS810GbKzUhwY1wJ+lPJ3iKjGVa2v1vEji5d1Fan5TRVSjQkaJC6UqKG7o+rmNbFwKxVL8R3dtXmuztMLUs8f0WASiE8v64fGtIKvMHthROhTdjIrCrMeeYerHYG6++pdtw/UNjtttptT4EQd6DxaZcfcg4fcbFe3FTB7+OjISIhKvl0A1+ZNootC4PJs4/RuVTmU2h7uDrH7Gw879JHx6K8tpSsGbfUslAUcyn5FLv8j3J9sATOnpM4MhERzQf8WXZ30AAi2NZ/iWbUTR3kqBz7gqILdkwbRvVN7zRCG12CGRF9kx/vA547ceeYHV41aOPrUwoMKKa9fj/fR3k/kX/oDB5YWVRuUXF/AyYlxvv/84O/Vum4tmHhBogGSjgtfCa73MHXH49dgcHBZsdoQKexefPE9hhra83dCCCHoW+2dETthYjhA9a4Hj+Jua4L1Kb3YGCfxuWq/cNSKgUDhWGc/fKh9cB7e489a5aCZwOHjLkKcQ6+3AnppGzNFbrmkNZ+8PJia+QTLXCdYyllcJuL+zK8lRZzeoSLZUW5YhsSGPM5yusJfFoebS1gfSCsYqZhrYgoM9UOsz7fElJ81imxGWHaeFkf+6ex7te6Bs5b7Na/KiWxRLNFFN1rJGdJYktcM/kKRFG1Sf6lHWo/VD+e8yXhXAZvMMbivX2XtKQIiWIb9yqZc337NMbkBlWKJPz+jmvcGpr4+LGS0CdvO1vGOSyxHtbsWg9Zyg0CM9onOc2KfxdMuUAbIc2oxnpn2z4aCVUK7/Gu4PCxkaAKa4DPD5ftIlRmuUIdGoE7yvWJftRw2xCsFNkw2Z6tfn4Y60zTXaQSrabiXv1wvYd5EVt6b9AmK0+RBnQBM/ccR0C81iZeyWrJ6dE23gD94Ern31iKenrUaTtAHGvxt7cGXuFluegJXxHJt0NZeBR1gr7L4e4+z3HQ8wHV8CS0jBiwD6n+maMi7gL1oz8jHbiHO/HNu3T2m3c/UKPanAgh8hW8LxvCOFlyPEq8fScg3FO7SAVl2iNXZSstzgWGYJ8sEtbyxmmUuyDxBetNLLpknRnGR+uu18QDSwttZPbid7M5c4Y7R7jcRt/bxZKrem/r3pl1c0W3dVW/Ogfez5Xe30tbT8EZnvOCq1BQ35sZ359+xqKcqnz9c790JSLZK19POgiMPsfZTHCW05HNBGdAKoLkOpURiwyxkp1SD4ZDN5wWvjsy+IqGMF7bccWLrl/JMp/1un5KbavnIbmWivmfAAAA//9lhV/l" } diff --git a/metricbeat/module/vsphere/host/_meta/data.json b/metricbeat/module/vsphere/host/_meta/data.json index 0034692e95b7..d5ed7a50213b 100644 --- a/metricbeat/module/vsphere/host/_meta/data.json +++ b/metricbeat/module/vsphere/host/_meta/data.json @@ -167,6 +167,7 @@ "network_names": [ "VM Network" ], + "id": "host-0", "name": "DC0_H0", "status": "green", "uptime": 1728865 diff --git a/metricbeat/module/vsphere/host/_meta/fields.yml b/metricbeat/module/vsphere/host/_meta/fields.yml index e466ef19e2ad..98246507da7b 100644 --- a/metricbeat/module/vsphere/host/_meta/fields.yml +++ b/metricbeat/module/vsphere/host/_meta/fields.yml @@ -52,6 +52,10 @@ description: > Sum of disk read and write rates each second in bytes. format: bytes + - name: id + type: keyword + description: > + Unique host ID. - name: memory.free.bytes type: long description: > diff --git a/metricbeat/module/vsphere/host/data.go b/metricbeat/module/vsphere/host/data.go index d877ac78af83..252218eec131 100644 --- a/metricbeat/module/vsphere/host/data.go +++ b/metricbeat/module/vsphere/host/data.go @@ -27,6 +27,7 @@ func (m *HostMetricSet) mapEvent(hs mo.HostSystem, data *metricData) mapstr.M { const bytesMultiplier int64 = 1024 * 1024 event := mapstr.M{ "name": hs.Summary.Config.Name, + "id": hs.Self.Value, "status": hs.Summary.OverallStatus, "uptime": hs.Summary.QuickStats.Uptime, "cpu": mapstr.M{"used": mapstr.M{"mhz": hs.Summary.QuickStats.OverallCpuUsage}}, diff --git a/metricbeat/module/vsphere/host/data_test.go b/metricbeat/module/vsphere/host/data_test.go index 8116fe417aac..3de32d104364 100644 --- a/metricbeat/module/vsphere/host/data_test.go +++ b/metricbeat/module/vsphere/host/data_test.go @@ -29,6 +29,13 @@ import ( func TestEventMapping(t *testing.T) { var m *HostMetricSet HostSystemTest := mo.HostSystem{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{ + Value: "ha-host", + }, + }, + }, Summary: types.HostListSummary{ Host: &types.ManagedObjectReference{Type: "HostSystem", Value: "ha-host"}, Hardware: &types.HostHardwareSummary{ @@ -81,6 +88,9 @@ func TestEventMapping(t *testing.T) { cpuTotal, _ := event.GetValue("cpu.total.mhz") assert.EqualValues(t, 4588, cpuTotal) + id, _ := event.GetValue("id") + assert.EqualValues(t, "ha-host", id) + cpuFree, _ := event.GetValue("cpu.free.mhz") assert.EqualValues(t, 4521, cpuFree) diff --git a/metricbeat/module/vsphere/network/_meta/data.json b/metricbeat/module/vsphere/network/_meta/data.json index 317fec007566..164ba4c2ae29 100644 --- a/metricbeat/module/vsphere/network/_meta/data.json +++ b/metricbeat/module/vsphere/network/_meta/data.json @@ -22,6 +22,7 @@ ], "count": 2 }, + "id": "network-1", "name": "VM Network", "status": "green", "accessible": true, diff --git a/metricbeat/module/vsphere/network/_meta/fields.yml b/metricbeat/module/vsphere/network/_meta/fields.yml index 66373f8b188f..2394bc7a3f4d 100644 --- a/metricbeat/module/vsphere/network/_meta/fields.yml +++ b/metricbeat/module/vsphere/network/_meta/fields.yml @@ -23,6 +23,10 @@ type: long description: > Number of hosts connected to this network. + - name: id + type: keyword + description: > + Unique network ID. - name: name type: keyword description: > @@ -30,7 +34,7 @@ - name: status type: keyword description: > - General health of the Network. + General health of the network. - name: type type: keyword description: > diff --git a/metricbeat/module/vsphere/network/data.go b/metricbeat/module/vsphere/network/data.go index 7f3605d25ab9..b640e3a9ce23 100644 --- a/metricbeat/module/vsphere/network/data.go +++ b/metricbeat/module/vsphere/network/data.go @@ -26,6 +26,7 @@ import ( func (m *NetworkMetricSet) mapEvent(net mo.Network, data *metricData) mapstr.M { event := mapstr.M{} + event.Put("id", net.Self.Value) event.Put("name", net.Name) event.Put("status", net.OverallStatus) event.Put("accessible", net.Summary.GetNetworkSummary().Accessible) diff --git a/metricbeat/module/vsphere/network/data_test.go b/metricbeat/module/vsphere/network/data_test.go index fb7c2120817a..7a9b625529cd 100644 --- a/metricbeat/module/vsphere/network/data_test.go +++ b/metricbeat/module/vsphere/network/data_test.go @@ -36,7 +36,8 @@ func TestEventMapping(t *testing.T) { ConfigStatus: "green", ExtensibleManagedObject: mo.ExtensibleManagedObject{ Self: types.ManagedObjectReference{ - Type: "Network", + Type: "Network", + Value: "NT_0", }, }, }, @@ -54,6 +55,9 @@ func TestEventMapping(t *testing.T) { name, _ := event.GetValue("name") assert.NotNil(t, name) + id, _ := event.GetValue("id") + assert.EqualValues(t, "NT_0", id) + status, _ := event.GetValue("status") assert.Equal(t, types.ManagedEntityStatus("green"), status) diff --git a/metricbeat/module/vsphere/resourcepool/_meta/data.json b/metricbeat/module/vsphere/resourcepool/_meta/data.json index a16e9d591979..92825ce14b7a 100644 --- a/metricbeat/module/vsphere/resourcepool/_meta/data.json +++ b/metricbeat/module/vsphere/resourcepool/_meta/data.json @@ -15,6 +15,7 @@ }, "vsphere": { "resourcepool": { + "id": "resgroup-30", "name": "Resources", "status": "green", "vm": { diff --git a/metricbeat/module/vsphere/resourcepool/_meta/fields.yml b/metricbeat/module/vsphere/resourcepool/_meta/fields.yml index 2df15df42e15..20d9676893b1 100644 --- a/metricbeat/module/vsphere/resourcepool/_meta/fields.yml +++ b/metricbeat/module/vsphere/resourcepool/_meta/fields.yml @@ -26,6 +26,10 @@ type: long description: > The static CPU resource entitlement for a virtual machine. + - name: id + type: keyword + description: > + Unique resource pool ID. - name: memory type: group fields: diff --git a/metricbeat/module/vsphere/resourcepool/data.go b/metricbeat/module/vsphere/resourcepool/data.go index 839e2669c695..76bb17cc67c2 100644 --- a/metricbeat/module/vsphere/resourcepool/data.go +++ b/metricbeat/module/vsphere/resourcepool/data.go @@ -32,6 +32,7 @@ func (m *ResourcePoolMetricSet) mapEvent(rp mo.ResourcePool, data *metricData) m event := mapstr.M{ "name": rp.Name, "status": rp.OverallStatus, + "id": rp.Self.Value, } quickStats := rp.Summary.GetResourcePoolSummary().QuickStats diff --git a/metricbeat/module/vsphere/resourcepool/data_test.go b/metricbeat/module/vsphere/resourcepool/data_test.go index ce7ccec484e1..48e85f1c2ba8 100644 --- a/metricbeat/module/vsphere/resourcepool/data_test.go +++ b/metricbeat/module/vsphere/resourcepool/data_test.go @@ -31,6 +31,11 @@ func TestEventMapping(t *testing.T) { ManagedEntity: mo.ManagedEntity{ OverallStatus: "green", Name: "resourcepool-test", + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{ + Value: "RS_0", + }, + }, }, Summary: &types.ResourcePoolSummary{ QuickStats: &types.ResourcePoolQuickStats{ @@ -70,7 +75,10 @@ func TestEventMapping(t *testing.T) { status, _ := event.GetValue("status") assert.EqualValues(t, "green", status) - name := event["name"].(string) + id, _ := event.GetValue("id") + assert.EqualValues(t, "RS_0", id) + + name, _ := event.GetValue("name") assert.EqualValues(t, name, "resourcepool-test") cpuUsage, _ := event.GetValue("cpu.usage.mhz") diff --git a/metricbeat/module/vsphere/virtualmachine/_meta/data.json b/metricbeat/module/vsphere/virtualmachine/_meta/data.json index 8549f84040a2..57ef6e747708 100644 --- a/metricbeat/module/vsphere/virtualmachine/_meta/data.json +++ b/metricbeat/module/vsphere/virtualmachine/_meta/data.json @@ -15,6 +15,7 @@ }, "vsphere": { "virtualmachine": { + "id": "vm-51", "name": "xt0nmfpv9", "uptime": 5348978, "status": "green", diff --git a/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml b/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml index a83013105dc1..eb6cc6d88e52 100644 --- a/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml +++ b/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml @@ -15,14 +15,18 @@ type: keyword description: > Hostname of the host. + - name: id + type: keyword + description: > + Unique virtual machine ID. - name: name type: keyword description: > - Virtual Machine name. + Virtual machine name. - name: os type: keyword description: > - Virtual Machine Operating System name. + Virtual machine Operating System name. - name: cpu.used.mhz type: long description: > @@ -38,22 +42,22 @@ - name: memory.used.guest.bytes type: long description: > - Used Memory of Guest in bytes. + Used memory of Guest in bytes. format: bytes - name: memory.used.host.bytes type: long description: > - Used Memory of Host in bytes. + Used memory of Host in bytes. format: bytes - name: memory.total.guest.bytes type: long description: > - Total Memory of Guest in bytes. + Total memory of Guest in bytes. format: bytes - name: memory.free.guest.bytes type: long description: > - Free Memory of Guest in bytes. + Free memory of Guest in bytes. format: bytes - name: custom_fields type: object diff --git a/metricbeat/module/vsphere/virtualmachine/data.go b/metricbeat/module/vsphere/virtualmachine/data.go index a107f11a0d2a..0839f5789cca 100644 --- a/metricbeat/module/vsphere/virtualmachine/data.go +++ b/metricbeat/module/vsphere/virtualmachine/data.go @@ -33,6 +33,7 @@ func (m *MetricSet) mapEvent(data VMData) mapstr.M { event := mapstr.M{ "name": data.VM.Summary.Config.Name, + "id": data.VM.Self.Value, "os": data.VM.Summary.Config.GuestFullName, "uptime": data.VM.Summary.QuickStats.UptimeSeconds, "status": data.VM.Summary.OverallStatus, diff --git a/metricbeat/module/vsphere/virtualmachine/data_test.go b/metricbeat/module/vsphere/virtualmachine/data_test.go index 32abe001c391..b1676db8118d 100644 --- a/metricbeat/module/vsphere/virtualmachine/data_test.go +++ b/metricbeat/module/vsphere/virtualmachine/data_test.go @@ -32,6 +32,13 @@ func TestEventMapping(t *testing.T) { var m MetricSet VirtualMachineTest := mo.VirtualMachine{ + ManagedEntity: mo.ManagedEntity{ + ExtensibleManagedObject: mo.ExtensibleManagedObject{ + Self: types.ManagedObjectReference{ + Value: "VM_0", + }, + }, + }, Summary: types.VirtualMachineSummary{ OverallStatus: types.ManagedEntityStatus("green"), Config: types.VirtualMachineConfigSummary{ @@ -83,6 +90,7 @@ func TestEventMapping(t *testing.T) { expectedEvent := mapstr.M{ "name": "localhost.localdomain", "os": "otherGuest", + "id": "VM_0", "uptime": int32(10), "status": types.ManagedEntityStatus("green"), "cpu": mapstr.M{ From fdb912a8ace4245dd64076a47b174059dd656b49 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Tue, 22 Oct 2024 08:57:34 -0400 Subject: [PATCH 61/90] Restore memory queue's internal event cleanup after a batch is vended (#41356) Fix https://github.com/elastic/beats/issues/41355, where event data in the memory queue was not being freed when event batches were acknowledged, but only gradually as the queue buffer was overwritten by later events. This gave the same effect as if all beat instances, even low-volume ones, were running with a full / saturated event queue. The root cause, found by @swiatekm, is [this PR](https://github.com/elastic/beats/pull/39584), an unrelated cleanup of old code that accidentally included one live call along with the deprecated ones. (There was an old `FreeEntries` hook in pipeline batches that was only used for deprecated shipper configs, but the cleanup also removed the `FreeEntries` call _inside_ the queue which was essential for releasing event memory.) --- CHANGELOG.next.asciidoc | 1 + libbeat/publisher/pipeline/ttl_batch.go | 1 + libbeat/publisher/pipeline/ttl_batch_test.go | 10 +++++ libbeat/publisher/queue/diskqueue/consumer.go | 3 ++ libbeat/publisher/queue/memqueue/broker.go | 9 +++++ .../publisher/queue/memqueue/queue_test.go | 38 +++++++++++++++++++ libbeat/publisher/queue/queue.go | 4 ++ 7 files changed, 66 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index bac0ca314e83..f601e72f5709 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -106,6 +106,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Support Elastic Agent control protocol chunking support {pull}37343[37343] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] - Set timeout of 1 minute for FQDN requests {pull}37756[37756] +- Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use {pull}41356[41356] *Auditbeat* diff --git a/libbeat/publisher/pipeline/ttl_batch.go b/libbeat/publisher/pipeline/ttl_batch.go index dab77fa56597..0ef4408b6136 100644 --- a/libbeat/publisher/pipeline/ttl_batch.go +++ b/libbeat/publisher/pipeline/ttl_batch.go @@ -77,6 +77,7 @@ func newBatch(retryer retryer, original queue.Batch, ttl int) *ttlBatch { events = append(events, event) } } + original.FreeEntries() b := &ttlBatch{ done: original.Done, diff --git a/libbeat/publisher/pipeline/ttl_batch_test.go b/libbeat/publisher/pipeline/ttl_batch_test.go index 769ccc37c35f..4c5207acbb07 100644 --- a/libbeat/publisher/pipeline/ttl_batch_test.go +++ b/libbeat/publisher/pipeline/ttl_batch_test.go @@ -112,6 +112,12 @@ func TestBatchCallsDoneAndFreesEvents(t *testing.T) { require.True(t, doneCalled, "Calling batch.Drop should invoke the done callback") } +func TestNewBatchFreesEvents(t *testing.T) { + queueBatch := &mockQueueBatch{} + _ = newBatch(nil, queueBatch, 0) + assert.Equal(t, 1, queueBatch.freeEntriesCalled, "Creating a new ttlBatch should call FreeEntries on the underlying queue.Batch") +} + type mockQueueBatch struct { freeEntriesCalled int } @@ -127,6 +133,10 @@ func (b *mockQueueBatch) Entry(i int) queue.Entry { return fmt.Sprintf("event %v", i) } +func (b *mockQueueBatch) FreeEntries() { + b.freeEntriesCalled++ +} + type mockRetryer struct { batches []*ttlBatch } diff --git a/libbeat/publisher/queue/diskqueue/consumer.go b/libbeat/publisher/queue/diskqueue/consumer.go index 20e6648d927e..a0e5e944df31 100644 --- a/libbeat/publisher/queue/diskqueue/consumer.go +++ b/libbeat/publisher/queue/diskqueue/consumer.go @@ -97,6 +97,9 @@ func (batch *diskQueueBatch) Entry(i int) queue.Entry { return batch.frames[i].event } +func (batch *diskQueueBatch) FreeEntries() { +} + func (batch *diskQueueBatch) Done() { batch.queue.acks.addFrames(batch.frames) } diff --git a/libbeat/publisher/queue/memqueue/broker.go b/libbeat/publisher/queue/memqueue/broker.go index b617bae61102..3e3e47e502cc 100644 --- a/libbeat/publisher/queue/memqueue/broker.go +++ b/libbeat/publisher/queue/memqueue/broker.go @@ -398,6 +398,15 @@ func (b *batch) Entry(i int) queue.Entry { return b.rawEntry(i).event } +func (b *batch) FreeEntries() { + // This signals that the event data has been copied out of the batch, and is + // safe to free from the queue buffer, so set all the event pointers to nil. + for i := 0; i < b.count; i++ { + index := (b.start + i) % len(b.queue.buf) + b.queue.buf[index].event = nil + } +} + func (b *batch) Done() { b.doneChan <- batchDoneMsg{} } diff --git a/libbeat/publisher/queue/memqueue/queue_test.go b/libbeat/publisher/queue/memqueue/queue_test.go index 9cd209bbd51e..168c923e5987 100644 --- a/libbeat/publisher/queue/memqueue/queue_test.go +++ b/libbeat/publisher/queue/memqueue/queue_test.go @@ -262,3 +262,41 @@ func TestAdjustInputQueueSize(t *testing.T) { assert.Equal(t, int(float64(mainQueue)*maxInputQueueSizeRatio), AdjustInputQueueSize(mainQueue, mainQueue)) }) } + +func TestBatchFreeEntries(t *testing.T) { + const queueSize = 10 + const batchSize = 5 + // 1. Add 10 events to the queue, request two batches with 5 events each + // 2. Make sure the queue buffer has 10 non-nil events + // 3. Call FreeEntries on the second batch + // 4. Make sure only events 6-10 are nil + // 5. Call FreeEntries on the first batch + // 6. Make sure all events are nil + testQueue := NewQueue(nil, nil, Settings{Events: queueSize, MaxGetRequest: batchSize, FlushTimeout: time.Second}, 0, nil) + producer := testQueue.Producer(queue.ProducerConfig{}) + for i := 0; i < queueSize; i++ { + _, ok := producer.Publish(i) + require.True(t, ok, "Queue publish must succeed") + } + batch1, err := testQueue.Get(batchSize) + require.NoError(t, err, "Queue read must succeed") + require.Equal(t, batchSize, batch1.Count(), "Returned batch size must match request") + batch2, err := testQueue.Get(batchSize) + require.NoError(t, err, "Queue read must succeed") + require.Equal(t, batchSize, batch2.Count(), "Returned batch size must match request") + // Slight concurrency subtlety: we check events are non-nil after the queue + // reads, since if we do it before we have no way to be sure the insert + // has been completed. + for i := 0; i < queueSize; i++ { + require.NotNil(t, testQueue.buf[i].event, "All queue events must be non-nil") + } + batch2.FreeEntries() + for i := 0; i < batchSize; i++ { + require.NotNilf(t, testQueue.buf[i].event, "Queue index %v: batch 1's events should be unaffected by calling FreeEntries on Batch 2", i) + require.Nilf(t, testQueue.buf[batchSize+i].event, "Queue index %v: batch 2's events should be nil after FreeEntries", batchSize+i) + } + batch1.FreeEntries() + for i := 0; i < queueSize; i++ { + require.Nilf(t, testQueue.buf[i].event, "Queue index %v: all events should be nil after calling FreeEntries on both batches") + } +} diff --git a/libbeat/publisher/queue/queue.go b/libbeat/publisher/queue/queue.go index 075d7ad66a46..983a835a0699 100644 --- a/libbeat/publisher/queue/queue.go +++ b/libbeat/publisher/queue/queue.go @@ -112,6 +112,10 @@ type Batch interface { Count() int Entry(i int) Entry Done() + // Release internal references to the contained events if supported + // (the disk queue does not currently implement this). + // Entry() should not be used after this call. + FreeEntries() } // Outputs can provide an EncoderFactory to enable early encoding, in which From 99d11ebfa9a2d2c6c16c556b8607738c98d3541e Mon Sep 17 00:00:00 2001 From: Leszek Kubik <39905449+intxgo@users.noreply.github.com> Date: Tue, 22 Oct 2024 16:01:13 +0200 Subject: [PATCH 62/90] fix truncated event log message (#41327) * fix truncated event log * changelog * fix warning * fix golint * playing hide an catch with CI * size in bytes * review * code review * add comment, unify code path * refactor code --- CHANGELOG.next.asciidoc | 1 + winlogbeat/eventlog/wineventlog.go | 11 +--- winlogbeat/sys/wineventlog/format_message.go | 32 +++++---- .../sys/wineventlog/wineventlog_windows.go | 66 ++++++++++++------- 4 files changed, 61 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f601e72f5709..d582c3c36916 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -81,6 +81,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* - Add "event.category" and "event.type" to Sysmon module for EventIDs 8, 9, 19, 20, 27, 28, 255 {pull}35193[35193] +- Fix truncated windows event log message {pull}41327[41327] *Functionbeat* diff --git a/winlogbeat/eventlog/wineventlog.go b/winlogbeat/eventlog/wineventlog.go index e418f22bf061..43654284218a 100644 --- a/winlogbeat/eventlog/wineventlog.go +++ b/winlogbeat/eventlog/wineventlog.go @@ -62,8 +62,7 @@ var ( const ( // renderBufferSize is the size in bytes of the buffer used to render events. - renderBufferSize = 1 << 14 - + renderBufferSize = 1 << 19 // 512KB, 256K wide characters // winEventLogApiName is the name used to identify the Windows Event Log API // as both an event type and an API. winEventLogAPIName = "wineventlog" @@ -448,14 +447,6 @@ func (l *winEventLog) Read() ([]Record, error) { for _, h := range handles { l.outputBuf.Reset() err := l.render(h, l.outputBuf) - var bufErr sys.InsufficientBufferError - if errors.As(err, &bufErr) { - detailf("%s Increasing render buffer size to %d", l.logPrefix, - bufErr.RequiredSize) - l.renderBuf = make([]byte, bufErr.RequiredSize) - l.outputBuf.Reset() - err = l.render(h, l.outputBuf) - } l.metrics.logError(err) if err != nil && l.outputBuf.Len() == 0 { logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err) diff --git a/winlogbeat/sys/wineventlog/format_message.go b/winlogbeat/sys/wineventlog/format_message.go index 9c1cf8254ace..4bc03166939a 100644 --- a/winlogbeat/sys/wineventlog/format_message.go +++ b/winlogbeat/sys/wineventlog/format_message.go @@ -75,39 +75,43 @@ func evtFormatMessage(metadataHandle EvtHandle, eventHandle EvtHandle, messageID valuesPtr = &values[0] } - // best guess render buffer size, 16KB, to avoid rendering message twice in most cases - const bestGuessRenderBufferSize = 1 << 14 + // best guess render buffer size, to avoid rendering message twice in most cases + const bestGuessRenderBufferSize = 1 << 19 // 512KB, 256K wide characters // EvtFormatMessage operates with WCHAR buffer, assuming the size of the buffer in characters. // https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtformatmessage - var bufferNeeded uint32 - bufferSize := uint32(bestGuessRenderBufferSize / 2) + var wcharBufferUsed uint32 + wcharBufferSize := uint32(bestGuessRenderBufferSize / 2) // Get a buffer from the pool and adjust its length. bb := sys.NewPooledByteBuffer() defer bb.Free() - bb.Reserve(int(bufferSize * 2)) + bb.Reserve(int(wcharBufferSize * 2)) - err := _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, bufferSize, bb.PtrAt(0), &bufferNeeded) + err := _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, wcharBufferSize, bb.PtrAt(0), &wcharBufferUsed) switch err { //nolint:errorlint // This is an errno or nil. - case nil: // OK - return sys.UTF16BytesToString(bb.Bytes()) - // Ignore some errors so it can tolerate missing or mismatched parameter values. - case windows.ERROR_EVT_UNRESOLVED_VALUE_INSERT, + case nil, // OK + windows.ERROR_EVT_UNRESOLVED_VALUE_INSERT, windows.ERROR_EVT_UNRESOLVED_PARAMETER_INSERT, windows.ERROR_EVT_MAX_INSERTS_REACHED: - return sys.UTF16BytesToString(bb.Bytes()) + // wcharBufferUsed indicates the size used internally to render the message. When called with nil buffer + // EvtFormatMessage returns ERROR_INSUFFICIENT_BUFFER, but otherwise succeeds copying only up to + // wcharBufferSize to our buffer, truncating the message if our buffer was too small. + if wcharBufferUsed <= wcharBufferSize { + return sys.UTF16BytesToString(bb.Bytes()) + } + fallthrough case windows.ERROR_INSUFFICIENT_BUFFER: - bb.Reserve(int(bufferNeeded * 2)) - bufferSize = bufferNeeded + bb.Reserve(int(wcharBufferUsed * 2)) + wcharBufferSize = wcharBufferUsed default: return "", fmt.Errorf("failed in EvtFormatMessage: %w", err) } - err = _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, bufferSize, bb.PtrAt(0), &bufferNeeded) + err = _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, wcharBufferSize, bb.PtrAt(0), &wcharBufferUsed) switch err { //nolint:errorlint // This is an errno or nil. case nil: // OK diff --git a/winlogbeat/sys/wineventlog/wineventlog_windows.go b/winlogbeat/sys/wineventlog/wineventlog_windows.go index 22495f6bda2e..66ab869fb24d 100644 --- a/winlogbeat/sys/wineventlog/wineventlog_windows.go +++ b/winlogbeat/sys/wineventlog/wineventlog_windows.go @@ -403,35 +403,35 @@ func FormatEventString( } var bufferPtr *byte - if renderBuf != nil { + if len(renderBuf) > 0 { bufferPtr = &renderBuf[0] } // EvtFormatMessage operates with WCHAR buffer, assuming the size of the buffer in characters. // https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtformatmessage - var bufferNeeded uint32 - bufferSize := uint32(len(renderBuf) / 2) + var wcharBufferUsed uint32 + wcharBufferSize := uint32(len(renderBuf) / 2) - err := _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, bufferSize, bufferPtr, &bufferNeeded) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // This is an errno. + err := _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, wcharBufferSize, bufferPtr, &wcharBufferUsed) + if err != nil && !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return fmt.Errorf("failed in EvtFormatMessage: %w", err) } else if err == nil { - // Windows API returns a null terminated WCHAR C-style string in the buffer. bufferNeeded applies - // only when ERROR_INSUFFICIENT_BUFFER is returned. Luckily the UTF16ToUTF8Bytes/UTF16ToString - // functions stop at null termination. Note, as signaled in a comment at the end of this function, - // this behavior is bad for EvtFormatMessageKeyword as then the API returns a list of null terminated - // strings in the buffer (it's fine for now as we don't use this parameter value). - return common.UTF16ToUTF8Bytes(renderBuf, out) + // wcharBufferUsed indicates the size used internally to render the message. When called with nil buffer + // EvtFormatMessage returns ERROR_INSUFFICIENT_BUFFER, but otherwise succeeds copying only up to + // wcharBufferSize to our buffer, truncating the message if our buffer was too small. + if wcharBufferUsed <= wcharBufferSize { + return common.UTF16ToUTF8Bytes(renderBuf[:wcharBufferUsed*2], out) + } } // Get a buffer from the pool and adjust its length. bb := sys.NewPooledByteBuffer() defer bb.Free() - bb.Reserve(int(bufferNeeded * 2)) - bufferSize = bufferNeeded + bb.Reserve(int(wcharBufferUsed * 2)) + wcharBufferSize = wcharBufferUsed - err = _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, bufferSize, bb.PtrAt(0), &bufferNeeded) + err = _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, wcharBufferSize, bb.PtrAt(0), &wcharBufferUsed) if err != nil { return fmt.Errorf("failed in EvtFormatMessage: %w", err) } @@ -550,20 +550,36 @@ func evtRenderProviderName(renderBuf []byte, eventHandle EvtHandle) (string, err } func renderXML(eventHandle EvtHandle, flag EvtRenderFlag, renderBuf []byte, out io.Writer) error { - var bufferUsed, propertyCount uint32 - err := _EvtRender(0, eventHandle, flag, uint32(len(renderBuf)), - &renderBuf[0], &bufferUsed, &propertyCount) - if err == ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // This is an errno or nil. - return sys.InsufficientBufferError{Cause: err, RequiredSize: int(bufferUsed)} + var bufferUsed, bufferSize, propertyCount uint32 + var bufferPtr *byte + + bufferSize = uint32(len(renderBuf)) + if bufferSize > 0 { + bufferPtr = &renderBuf[0] } - if err != nil { + err := _EvtRender(0, eventHandle, flag, bufferSize, bufferPtr, &bufferUsed, &propertyCount) + if err != nil && !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return err + } else if err == nil { + // bufferUsed indicates the size used internally to render the message. When called with nil buffer + // EvtRender returns ERROR_INSUFFICIENT_BUFFER, but otherwise succeeds copying only up to + // bufferSize to our buffer, truncating the message if our buffer was too small. + if bufferUsed <= bufferSize { + return common.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) + } } - if int(bufferUsed) > len(renderBuf) { - return fmt.Errorf("Windows EvtRender reported that wrote %d bytes "+ - "to the buffer, but the buffer can only hold %d bytes", - bufferUsed, len(renderBuf)) + // Get a buffer from the pool and adjust its length. + bb := sys.NewPooledByteBuffer() + defer bb.Free() + + bb.Reserve(int(bufferUsed)) + bufferSize = bufferUsed + + err = _EvtRender(0, eventHandle, flag, bufferSize, bb.PtrAt(0), &bufferUsed, &propertyCount) + if err != nil { + return fmt.Errorf("failed in EvtRender: %w", err) } - return common.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) + + return common.UTF16ToUTF8Bytes(bb.Bytes(), out) } From 23b2804d0a7bd71211a9ee43ac9eaa9bacdaf6fa Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Tue, 22 Oct 2024 10:28:08 -0400 Subject: [PATCH 63/90] Delete unused/buggy EventACKTracker helper (#41357) --- x-pack/libbeat/common/aws/acker.go | 85 ------------------------- x-pack/libbeat/common/aws/acker_test.go | 69 -------------------- 2 files changed, 154 deletions(-) delete mode 100644 x-pack/libbeat/common/aws/acker.go delete mode 100644 x-pack/libbeat/common/aws/acker_test.go diff --git a/x-pack/libbeat/common/aws/acker.go b/x-pack/libbeat/common/aws/acker.go deleted file mode 100644 index 95fbe14b7744..000000000000 --- a/x-pack/libbeat/common/aws/acker.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package aws - -import ( - "context" - "sync" - - "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common/acker" -) - -// EventACKTracker tracks the publishing state of S3 objects. Specifically -// it tracks the number of message acknowledgements that are pending from the -// output. It can be used to wait until all ACKs have been received for one or -// more S3 objects. -type EventACKTracker struct { - sync.Mutex - PendingACKs int64 - ctx context.Context - cancel context.CancelFunc -} - -func NewEventACKTracker(ctx context.Context) *EventACKTracker { - ctx, cancel := context.WithCancel(ctx) - return &EventACKTracker{ctx: ctx, cancel: cancel} -} - -// Add increments the number of pending ACKs. -func (a *EventACKTracker) Add() { - a.Lock() - a.PendingACKs++ - a.Unlock() -} - -// ACK decrements the number of pending ACKs. -func (a *EventACKTracker) ACK() { - a.Lock() - defer a.Unlock() - - if a.PendingACKs <= 0 { - panic("misuse detected: negative ACK counter") - } - - a.PendingACKs-- - if a.PendingACKs == 0 { - a.cancel() - } -} - -// Wait waits for the number of pending ACKs to be zero. -// Wait must be called sequentially only after every expected -// `Add` calls are made. Failing to do so could reset the pendingACKs -// property to 0 and would results in Wait returning after additional -// calls to `Add` are made without a corresponding `ACK` call. -func (a *EventACKTracker) Wait() { - // If there were never any pending ACKs then cancel the context. (This can - // happen when a document contains no events or cannot be read due to an error). - a.Lock() - if a.PendingACKs == 0 { - a.cancel() - } - a.Unlock() - - // Wait. - <-a.ctx.Done() -} - -// NewEventACKHandler returns a beat ACKer that can receive callbacks when -// an event has been ACKed an output. If the event contains a private metadata -// pointing to an eventACKTracker then it will invoke the trackers ACK() method -// to decrement the number of pending ACKs. -func NewEventACKHandler() beat.EventListener { - return acker.ConnectionOnly( - acker.EventPrivateReporter(func(_ int, privates []interface{}) { - for _, private := range privates { - if ack, ok := private.(*EventACKTracker); ok { - ack.ACK() - } - } - }), - ) -} diff --git a/x-pack/libbeat/common/aws/acker_test.go b/x-pack/libbeat/common/aws/acker_test.go deleted file mode 100644 index 3c470f0b922b..000000000000 --- a/x-pack/libbeat/common/aws/acker_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package aws - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/beat" -) - -func TestEventACKTracker(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - acker := NewEventACKTracker(ctx) - acker.Add() - acker.ACK() - - assert.EqualValues(t, 0, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} - -func TestEventACKTrackerNoACKs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - acker := NewEventACKTracker(ctx) - acker.Wait() - - assert.EqualValues(t, 0, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} - -func TestEventACKHandler(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Create acker. Add one pending ACK. - acker := NewEventACKTracker(ctx) - acker.Add() - - // Create an ACK handler and simulate one ACKed event. - ackHandler := NewEventACKHandler() - ackHandler.AddEvent(beat.Event{Private: acker}, true) - ackHandler.ACKEvents(1) - - assert.EqualValues(t, 0, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} - -func TestEventACKHandlerWait(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Create acker. Add one pending ACK. - acker := NewEventACKTracker(ctx) - acker.Add() - acker.ACK() - acker.Wait() - acker.Add() - - assert.EqualValues(t, 1, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} From 15eae5f17eef6676258d43aa2db0300f3d9cf9c9 Mon Sep 17 00:00:00 2001 From: Lee E Hinman <57081003+leehinman@users.noreply.github.com> Date: Tue, 22 Oct 2024 10:29:03 -0500 Subject: [PATCH 64/90] refactor beats argument parsing (#41277) * modify beats argument parsing to not happen during init * allowlist for backwards compatible args, to conver -arg to --arg Co-authored-by: Tiago Queiroz --- NOTICE.txt | 74 +++++----- auditbeat/main_test.go | 4 + filebeat/cmd/generate.go | 7 + filebeat/cmd/root.go | 5 + filebeat/main_test.go | 4 + go.mod | 2 +- heartbeat/cmd/root.go | 2 + heartbeat/main_test.go | 4 + libbeat/cfgfile/cfgfile.go | 128 +++++++++++++----- libbeat/cmd/export/dashboard.go | 4 + libbeat/cmd/export/ilm_policy.go | 3 + libbeat/cmd/export/index_pattern.go | 2 + libbeat/cmd/export/template.go | 4 + libbeat/cmd/instance/beat.go | 6 +- libbeat/cmd/instance/beat_integration_test.go | 3 + libbeat/cmd/keystore.go | 78 ++++++----- libbeat/cmd/root.go | 23 ++-- libbeat/cmd/run.go | 5 + libbeat/cmd/setup.go | 6 + libbeat/common/fleetmode/fleet_mode.go | 39 ++---- libbeat/libbeat_test.go | 4 + metricbeat/cmd/root.go | 2 + metricbeat/main_test.go | 4 + packetbeat/cmd/root.go | 6 + packetbeat/main_test.go | 4 + winlogbeat/main_test.go | 4 + x-pack/agentbeat/main_test.go | 5 + x-pack/auditbeat/main_test.go | 4 + x-pack/filebeat/main_test.go | 4 + x-pack/functionbeat/main_test.go | 4 + x-pack/functionbeat/provider/aws/cmd/root.go | 4 + x-pack/functionbeat/provider/aws/main_test.go | 5 +- .../functionbeat/provider/local/main_test.go | 4 + x-pack/heartbeat/main_test.go | 4 + x-pack/libbeat/libbeat_test.go | 4 + x-pack/metricbeat/main_test.go | 4 + x-pack/osquerybeat/main_test.go | 4 + x-pack/packetbeat/main_test.go | 4 + x-pack/winlogbeat/cmd/export.go | 3 + x-pack/winlogbeat/main_test.go | 4 + 40 files changed, 338 insertions(+), 146 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index b5df79133f70..e6ebacd0ffe4 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -25581,6 +25581,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.24.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.24.0/LICENSE: + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/text Version: v0.18.0 @@ -57052,43 +57089,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/term -Version: v0.24.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.24.0/LICENSE: - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/xerrors Version: v0.0.0-20231012003039-104605ab7028 diff --git a/auditbeat/main_test.go b/auditbeat/main_test.go index f91bc1f94822..8b16fe63a583 100644 --- a/auditbeat/main_test.go +++ b/auditbeat/main_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/elastic/beats/v7/auditbeat/cmd" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -34,11 +35,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/filebeat/cmd/generate.go b/filebeat/cmd/generate.go index 582038716a1e..1e5a4b1a819b 100644 --- a/filebeat/cmd/generate.go +++ b/filebeat/cmd/generate.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/filebeat/generator/fields" "github.com/elastic/beats/v7/filebeat/generator/fileset" "github.com/elastic/beats/v7/filebeat/generator/module" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common/cli" "github.com/elastic/elastic-agent-libs/paths" ) @@ -63,7 +64,9 @@ func genGenerateModuleCmd() *cobra.Command { } genModuleCmd.Flags().String("modules-path", defaultHomePath, "Path to modules directory") + cfgfile.AddAllowedBackwardsCompatibleFlag("modules-path") genModuleCmd.Flags().String("es-beats", defaultHomePath, "Path to Elastic Beats") + cfgfile.AddAllowedBackwardsCompatibleFlag("es-beats") return genModuleCmd } @@ -88,7 +91,9 @@ func genGenerateFilesetCmd() *cobra.Command { } genFilesetCmd.Flags().String("modules-path", defaultHomePath, "Path to modules directory") + cfgfile.AddAllowedBackwardsCompatibleFlag("modules-path") genFilesetCmd.Flags().String("es-beats", defaultHomePath, "Path to Elastic Beats") + cfgfile.AddAllowedBackwardsCompatibleFlag("es-beats") return genFilesetCmd } @@ -113,7 +118,9 @@ func genGenerateFieldsCmd() *cobra.Command { } genFieldsCmd.Flags().String("es-beats", defaultHomePath, "Path to Elastic Beats") + cfgfile.AddAllowedBackwardsCompatibleFlag("es-beats") genFieldsCmd.Flags().Bool("without-documentation", false, "Do not add description fields") + cfgfile.AddAllowedBackwardsCompatibleFlag("without-documentation") return genFieldsCmd } diff --git a/filebeat/cmd/root.go b/filebeat/cmd/root.go index 2420a03efdba..48e9c9d74bce 100644 --- a/filebeat/cmd/root.go +++ b/filebeat/cmd/root.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/filebeat/fileset" "github.com/elastic/beats/v7/filebeat/include" "github.com/elastic/beats/v7/filebeat/input" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" @@ -49,7 +50,9 @@ func FilebeatSettings(moduleNameSpace string) instance.Settings { } runFlags := pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("once")) + cfgfile.AddAllowedBackwardsCompatibleFlag("once") runFlags.AddGoFlag(flag.CommandLine.Lookup("modules")) + cfgfile.AddAllowedBackwardsCompatibleFlag("modules") return instance.Settings{ RunFlags: runFlags, Name: Name, @@ -66,8 +69,10 @@ func FilebeatSettings(moduleNameSpace string) instance.Settings { func Filebeat(inputs beater.PluginFactory, settings instance.Settings) *cmd.BeatsRootCmd { command := cmd.GenRootCmdWithSettings(beater.New(inputs), settings) command.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("M")) + cfgfile.AddAllowedBackwardsCompatibleFlag("M") command.TestCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("modules")) command.SetupCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("modules")) + cfgfile.AddAllowedBackwardsCompatibleFlag("modules") command.AddCommand(cmd.GenModulesCmd(Name, "", buildModulesManager)) command.AddCommand(genGenerateCmd()) return command diff --git a/filebeat/main_test.go b/filebeat/main_test.go index 0f989a77a355..b73c88438e6f 100644 --- a/filebeat/main_test.go +++ b/filebeat/main_test.go @@ -26,6 +26,7 @@ import ( fbcmd "github.com/elastic/beats/v7/filebeat/cmd" inputs "github.com/elastic/beats/v7/filebeat/input/default-inputs" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -40,11 +41,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") fbCommand = fbcmd.Filebeat(inputs.Init, fbcmd.FilebeatSettings("")) fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { if err := fbCommand.Execute(); err != nil { os.Exit(1) diff --git a/go.mod b/go.mod index 75aba4602368..252fea019974 100644 --- a/go.mod +++ b/go.mod @@ -224,6 +224,7 @@ require ( go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/receiver v0.109.0 + golang.org/x/term v0.24.0 google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -382,7 +383,6 @@ require ( go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 // indirect - golang.org/x/term v0.24.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/heartbeat/cmd/root.go b/heartbeat/cmd/root.go index fd95013c6fe0..a1f80a7cda71 100644 --- a/heartbeat/cmd/root.go +++ b/heartbeat/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/elastic/beats/v7/heartbeat/beater" "github.com/elastic/beats/v7/heartbeat/include" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/ecs" @@ -81,6 +82,7 @@ func Initialize(settings instance.Settings) *cmd.BeatsRootCmd { ` setup.ResetFlags() setup.Flags().Bool(cmd.IndexManagementKey, false, "Setup all components related to Elasticsearch index management, including template, ilm policy and rollover alias") + cfgfile.AddAllowedBackwardsCompatibleFlag(cmd.IndexManagementKey) return rootCmd } diff --git a/heartbeat/main_test.go b/heartbeat/main_test.go index a806e1588bd0..18cc332b63af 100644 --- a/heartbeat/main_test.go +++ b/heartbeat/main_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/elastic/beats/v7/heartbeat/cmd" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -33,11 +34,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(_ *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index ca19af8cb9f6..14e38c5ab7de 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -18,9 +18,12 @@ package cfgfile import ( + "flag" "fmt" "os" "path/filepath" + "strings" + "sync" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/fleetmode" @@ -28,39 +31,72 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -// Command line flags. +// Evil package level globals var ( - // The default config cannot include the beat name as it is not initialized - // when this variable is created. See ChangeDefaultCfgfileFlag which should - // be called prior to flags.Parse(). - configfiles = config.StringArrFlag(nil, "c", "beat.yml", "Configuration file, relative to path.config") - overwrites = config.SettingFlag(nil, "E", "Configuration overwrite") - - // Additional default settings, that must be available for variable expansion - defaults = config.MustNewConfigFrom(map[string]interface{}{ - "path": map[string]interface{}{ - "home": ".", // to be initialized by beat - "config": "${path.home}", - "data": fmt.Sprint("${path.home}", string(os.PathSeparator), "data"), - "logs": fmt.Sprint("${path.home}", string(os.PathSeparator), "logs"), - }, + once sync.Once + configfiles *config.StringsFlag + overwrites *config.C + defaults *config.C + homePath *string + configPath *string + allowedBackwardsCompatibleFlags []string +) + +func Initialize() { + once.Do(func() { + // The default config cannot include the beat name as + // it is not initialized when this variable is + // created. See ChangeDefaultCfgfileFlag which should + // be called prior to flags.Parse(). + configfiles = config.StringArrFlag(nil, "c", "beat.yml", "Configuration file, relative to path.config") + AddAllowedBackwardsCompatibleFlag("c") + overwrites = config.SettingFlag(nil, "E", "Configuration overwrite") + AddAllowedBackwardsCompatibleFlag("E") + defaults = config.MustNewConfigFrom(map[string]interface{}{ + "path": map[string]interface{}{ + "home": ".", // to be initialized by beat + "config": "${path.home}", + "data": filepath.Join("${path.home}", "data"), + "logs": filepath.Join("${path.home}", "logs"), + }, + }) + homePath = config.ConfigOverwriteFlag(nil, overwrites, "path.home", "path.home", "", "Home path") + AddAllowedBackwardsCompatibleFlag("path.home") + configPath = config.ConfigOverwriteFlag(nil, overwrites, "path.config", "path.config", "", "Configuration path") + AddAllowedBackwardsCompatibleFlag("path.config") + _ = config.ConfigOverwriteFlag(nil, overwrites, "path.data", "path.data", "", "Data path") + AddAllowedBackwardsCompatibleFlag("path.data") + _ = config.ConfigOverwriteFlag(nil, overwrites, "path.logs", "path.logs", "", "Logs path") + AddAllowedBackwardsCompatibleFlag("path.logs") }) +} - // home-path CLI flag (initialized in init) - homePath *string - configPath *string -) +func isAllowedBackwardsCompatibleFlag(f string) bool { + for _, existing := range allowedBackwardsCompatibleFlags { + if existing == f { + return true + } + } + return false +} -func init() { - // add '-path.x' options overwriting paths in 'overwrites' config - makePathFlag := func(name, usage string) *string { - return config.ConfigOverwriteFlag(nil, overwrites, name, name, "", usage) +func AddAllowedBackwardsCompatibleFlag(f string) { + if isAllowedBackwardsCompatibleFlag(f) { + return } + allowedBackwardsCompatibleFlags = append(allowedBackwardsCompatibleFlags, f) +} - homePath = makePathFlag("path.home", "Home path") - configPath = makePathFlag("path.config", "Configuration path") - makePathFlag("path.data", "Data path") - makePathFlag("path.logs", "Logs path") +func ConvertFlagsForBackwardsCompatibility() { + // backwards compatibility workaround, convert -flags to --flags: + for i, arg := range os.Args[1:] { + if strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") { + candidate, _, _ := strings.Cut(strings.TrimPrefix(arg, "-"), "=") + if isAllowedBackwardsCompatibleFlag(candidate) { + os.Args[1+i] = "-" + arg + } + } + } } // OverrideChecker checks if a config should be overwritten. @@ -73,9 +109,11 @@ type ConditionalOverride struct { Config *config.C } -// ChangeDefaultCfgfileFlag replaces the value and default value for the `-c` -// flag so that it reflects the beat name. +// ChangeDefaultCfgfileFlag replaces the value and default value for +// the `-c` flag so that it reflects the beat name. It will call +// Initialize() to register the `-c` flags func ChangeDefaultCfgfileFlag(beatName string) error { + Initialize() configfiles.SetDefault(beatName + ".yml") return nil } @@ -96,8 +134,12 @@ func GetDefaultCfgfile() string { return cfg } -// HandleFlags adapts default config settings based on command line flags. +// HandleFlags adapts default config settings based on command line +// flags. This also stores if -E management.enabled=true was set on +// command line to determine if running the Beat under agent. It will +// call Initialize() to register the flags like `-E`. func HandleFlags() error { + Initialize() // default for the home path is the binary location home, err := filepath.Abs(filepath.Dir(os.Args[0])) if err != nil { @@ -114,6 +156,27 @@ func HandleFlags() error { common.PrintConfigDebugf(overwrites, "CLI setting overwrites (-E flag):") } + // Enable check to see if beat is running under Agent + // This is stored in a package so the modules which don't have + // access to the config can check this value. + type management struct { + Enabled bool `config:"management.enabled"` + } + var managementSettings management + cfgFlag := flag.Lookup("E") + if cfgFlag == nil { + fleetmode.SetAgentMode(false) + return nil + } + cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) + cliCfg := cfgObject.Config() + + err = cliCfg.Unpack(&managementSettings) + if err != nil { + fleetmode.SetAgentMode(false) + return nil //nolint:nilerr // unpacking failing isn't an error for this case + } + fleetmode.SetAgentMode(managementSettings.Enabled) return nil } @@ -220,8 +283,11 @@ func SetConfigPath(path string) { *configPath = path } -// GetPathConfig returns ${path.config}. If ${path.config} is not set, ${path.home} is returned. +// GetPathConfig returns ${path.config}. If ${path.config} is not set, +// ${path.home} is returned. It will call Initialize to ensure that +// `path.config` and `path.home` are set. func GetPathConfig() string { + Initialize() if *configPath != "" { return *configPath } else if *homePath != "" { diff --git a/libbeat/cmd/export/dashboard.go b/libbeat/cmd/export/dashboard.go index 7b878e00788b..4a4e13167f8c 100644 --- a/libbeat/cmd/export/dashboard.go +++ b/libbeat/cmd/export/dashboard.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/dashboards" "github.com/elastic/beats/v7/libbeat/version" @@ -101,8 +102,11 @@ func GenDashboardCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("id", "", "Dashboard id") + cfgfile.AddAllowedBackwardsCompatibleFlag("id") genTemplateConfigCmd.Flags().String("yml", "", "Yaml file containing list of dashboard ID and filename pairs") + cfgfile.AddAllowedBackwardsCompatibleFlag("yml") genTemplateConfigCmd.Flags().String("folder", "", "Target folder to save exported assets") + cfgfile.AddAllowedBackwardsCompatibleFlag("folder") return genTemplateConfigCmd } diff --git a/libbeat/cmd/export/ilm_policy.go b/libbeat/cmd/export/ilm_policy.go index 60c97920fd79..557c62c8aaef 100644 --- a/libbeat/cmd/export/ilm_policy.go +++ b/libbeat/cmd/export/ilm_policy.go @@ -20,6 +20,7 @@ package export import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/idxmgmt" "github.com/elastic/beats/v7/libbeat/idxmgmt/lifecycle" @@ -57,7 +58,9 @@ func GenGetILMPolicyCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("es.version", settings.Version, "Elasticsearch version") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") genTemplateConfigCmd.Flags().String("dir", "", "Specify directory for printing policy files. By default policies are printed to stdout.") + cfgfile.AddAllowedBackwardsCompatibleFlag("dir") return genTemplateConfigCmd } diff --git a/libbeat/cmd/export/index_pattern.go b/libbeat/cmd/export/index_pattern.go index 6b8b8c6839f6..863fc7a72cfe 100644 --- a/libbeat/cmd/export/index_pattern.go +++ b/libbeat/cmd/export/index_pattern.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/kibana" libversion "github.com/elastic/elastic-agent-libs/version" @@ -67,6 +68,7 @@ func GenIndexPatternConfigCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("es.version", settings.Version, "Elasticsearch version") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") return genTemplateConfigCmd } diff --git a/libbeat/cmd/export/template.go b/libbeat/cmd/export/template.go index ffd957961efe..45a83b986817 100644 --- a/libbeat/cmd/export/template.go +++ b/libbeat/cmd/export/template.go @@ -20,6 +20,7 @@ package export import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/idxmgmt" "github.com/elastic/beats/v7/libbeat/idxmgmt/lifecycle" @@ -59,8 +60,11 @@ func GenTemplateConfigCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("es.version", settings.Version, "Elasticsearch version") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") genTemplateConfigCmd.Flags().Bool("noilm", false, "Generate template with ILM disabled") + cfgfile.AddAllowedBackwardsCompatibleFlag("noilm") genTemplateConfigCmd.Flags().String("dir", "", "Specify directory for printing template files. By default templates are printed to stdout.") + cfgfile.AddAllowedBackwardsCompatibleFlag("dir") return genTemplateConfigCmd } diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 1a6250fad4d5..23efa03b4897 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -974,9 +974,11 @@ func (b *Beat) Setup(settings Settings, bt beat.Creator, setup SetupSettings) er }()) } -// handleFlags parses the command line flags. It invokes the HandleFlags -// callback if implemented by the Beat. +// handleFlags converts -flag to --flags, parses the command line +// flags, and it invokes the HandleFlags callback if implemented by +// the Beat. func (b *Beat) handleFlags() error { + cfgfile.ConvertFlagsForBackwardsCompatibility() flag.Parse() return cfgfile.HandleFlags() } diff --git a/libbeat/cmd/instance/beat_integration_test.go b/libbeat/cmd/instance/beat_integration_test.go index baf7657665d2..1bca1400de61 100644 --- a/libbeat/cmd/instance/beat_integration_test.go +++ b/libbeat/cmd/instance/beat_integration_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/mock" "github.com/elastic/elastic-agent-libs/config" @@ -92,7 +93,9 @@ func TestMonitoringNameFromConfig(t *testing.T) { defer wg.Done() // Set the configuration file path flag so the beat can read it + cfgfile.Initialize() _ = flag.Set("c", "testdata/mockbeat.yml") + cfgfile.AddAllowedBackwardsCompatibleFlag("c") _ = instance.Run(mock.Settings, func(_ *beat.Beat, _ *config.C) (beat.Beater, error) { return &mockBeat, nil }) diff --git a/libbeat/cmd/keystore.go b/libbeat/cmd/keystore.go index 7f5ecd78c41b..cfc392020693 100644 --- a/libbeat/cmd/keystore.go +++ b/libbeat/cmd/keystore.go @@ -21,14 +21,15 @@ import ( "bufio" "errors" "fmt" - "io/ioutil" + "io" "os" "strings" "syscall" "github.com/spf13/cobra" - tml "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/common/cli" "github.com/elastic/beats/v7/libbeat/common/terminal" @@ -38,7 +39,7 @@ import ( func getKeystore(settings instance.Settings) (keystore.Keystore, error) { b, err := instance.NewInitializedBeat(settings) if err != nil { - return nil, fmt.Errorf("error initializing beat: %s", err) + return nil, fmt.Errorf("error initializing beat: %w", err) } return b.Keystore(), nil @@ -74,6 +75,7 @@ func genCreateKeystoreCmd(settings instance.Settings) *cobra.Command { }), } command.Flags().BoolVar(&flagForce, "force", false, "override the existing keystore") + cfgfile.AddAllowedBackwardsCompatibleFlag("force") return command } @@ -92,7 +94,9 @@ func genAddKeystoreCmd(settings instance.Settings) *cobra.Command { }), } command.Flags().BoolVar(&flagStdin, "stdin", false, "Use the stdin as the source of the secret") + cfgfile.AddAllowedBackwardsCompatibleFlag("stdin") command.Flags().BoolVar(&flagForce, "force", false, "Override the existing key") + cfgfile.AddAllowedBackwardsCompatibleFlag("force") return command } @@ -132,27 +136,27 @@ func createKeystore(settings instance.Settings, force bool) error { writableKeystore, err := keystore.AsWritableKeystore(store) if err != nil { - return fmt.Errorf("error creating the keystore: %s", err) + return fmt.Errorf("error creating the keystore: %w", err) } - if store.IsPersisted() == true && force == false { + if store.IsPersisted() && !force { response := terminal.PromptYesNo("A keystore already exists, Overwrite?", false) - if response == true { + if response { err := writableKeystore.Create(true) if err != nil { - return fmt.Errorf("error creating the keystore: %s", err) + return fmt.Errorf("error creating the keystore: %w", err) } } else { - fmt.Println("Exiting without creating keystore.") + fmt.Printf("Exiting without creating %s keystore.", settings.Name) //nolint:forbidigo //needs refactor return nil } } else { err := writableKeystore.Create(true) if err != nil { - return fmt.Errorf("Error creating the keystore: %s", err) + return fmt.Errorf("Error creating the keystore: %w", err) } } - fmt.Printf("Created %s keystore\n", settings.Name) + fmt.Printf("Created %s keystore\n", settings.Name) //nolint:forbidigo //needs refactor return nil } @@ -167,32 +171,32 @@ func addKey(store keystore.Keystore, keys []string, force, stdin bool) error { writableKeystore, err := keystore.AsWritableKeystore(store) if err != nil { - return fmt.Errorf("error creating the keystore: %s", err) + return fmt.Errorf("error creating the keystore: %w", err) } - if store.IsPersisted() == false { - if force == false { + if !store.IsPersisted() { + if !force { answer := terminal.PromptYesNo("The keystore does not exist. Do you want to create it?", false) - if answer == false { + if !answer { return errors.New("exiting without creating keystore") } } err := writableKeystore.Create(true) if err != nil { - return fmt.Errorf("could not create keystore, error: %s", err) + return fmt.Errorf("could not create keystore, error: %w", err) } - fmt.Println("Created keystore") + fmt.Println("Created keystore") //nolint:forbidigo //needs refactor } key := strings.TrimSpace(keys[0]) - value, err := store.Retrieve(key) - if value != nil && force == false { - if stdin == true { + value, _ := store.Retrieve(key) + if value != nil && !force { + if stdin { return fmt.Errorf("the settings %s already exist in the keystore use `--force` to replace it", key) } answer := terminal.PromptYesNo(fmt.Sprintf("Setting %s already exists, Overwrite?", key), false) - if answer == false { - fmt.Println("Exiting without modifying keystore.") + if !answer { + fmt.Println("Exiting without modifying keystore.") //nolint:forbidigo //needs refactor return nil } } @@ -200,25 +204,25 @@ func addKey(store keystore.Keystore, keys []string, force, stdin bool) error { var keyValue []byte if stdin { reader := bufio.NewReader(os.Stdin) - keyValue, err = ioutil.ReadAll(reader) + keyValue, err = io.ReadAll(reader) if err != nil { return fmt.Errorf("could not read input from stdin") } } else { - fmt.Printf("Enter value for %s: ", key) - keyValue, err = tml.ReadPassword(int(syscall.Stdin)) - fmt.Println() + fmt.Printf("Enter value for %s: ", key) //nolint:forbidigo //needs refactor + keyValue, err = term.ReadPassword(int(syscall.Stdin)) //nolint:unconvert,nolintlint //necessary on Windows + fmt.Println() //nolint:forbidigo //needs refactor if err != nil { - return fmt.Errorf("could not read value from the input, error: %s", err) + return fmt.Errorf("could not read value from the input, error: %w", err) } } if err = writableKeystore.Store(key, keyValue); err != nil { - return fmt.Errorf("could not add the key in the keystore, error: %s", err) + return fmt.Errorf("could not add the key in the keystore, error: %w", err) } if err = writableKeystore.Save(); err != nil { - return fmt.Errorf("fail to save the keystore: %s", err) + return fmt.Errorf("fail to save the keystore: %w", err) } else { - fmt.Println("Successfully updated the keystore") + fmt.Println("Successfully updated the keystore") //nolint:forbidigo //needs refactor } return nil } @@ -230,10 +234,10 @@ func removeKey(store keystore.Keystore, keys []string) error { writableKeystore, err := keystore.AsWritableKeystore(store) if err != nil { - return fmt.Errorf("error deleting the keystore: %s", err) + return fmt.Errorf("error deleting the keystore: %w", err) } - if store.IsPersisted() == false { + if !store.IsPersisted() { return errors.New("the keystore doesn't exist. Use the 'create' command to create one") } @@ -244,12 +248,12 @@ func removeKey(store keystore.Keystore, keys []string) error { return fmt.Errorf("could not find key '%v' in the keystore", key) } - writableKeystore.Delete(key) + _ = writableKeystore.Delete(key) err = writableKeystore.Save() if err != nil { - return fmt.Errorf("could not update the keystore with the changes, key: %s, error: %v", key, err) + return fmt.Errorf("could not update the keystore with the changes, key: %s, error: %w", key, err) } - fmt.Printf("successfully removed key: %s\n", key) + fmt.Printf("successfully removed key: %s\n", key) //nolint:forbidigo //needs refactor } return nil } @@ -257,14 +261,14 @@ func removeKey(store keystore.Keystore, keys []string) error { func list(store keystore.Keystore) error { listingKeystore, err := keystore.AsListingKeystore(store) if err != nil { - return fmt.Errorf("error listing the keystore: %s", err) + return fmt.Errorf("error listing the keystore: %w", err) } keys, err := listingKeystore.List() if err != nil { - return fmt.Errorf("could not read values from the keystore, error: %s", err) + return fmt.Errorf("could not read values from the keystore, error: %w", err) } for _, key := range keys { - fmt.Println(key) + fmt.Println(key) //nolint:forbidigo //needs refactor } return nil } diff --git a/libbeat/cmd/root.go b/libbeat/cmd/root.go index 589d706fc615..cbe2f7f8f6e5 100644 --- a/libbeat/cmd/root.go +++ b/libbeat/cmd/root.go @@ -21,7 +21,6 @@ import ( "flag" "fmt" "os" - "strings" "github.com/spf13/cobra" @@ -33,15 +32,6 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" ) -func init() { - // backwards compatibility workaround, convert -flags to --flags: - for i, arg := range os.Args[1:] { - if strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") && len(arg) > 2 { - os.Args[1+i] = "-" + arg - } - } -} - // BeatsRootCmd handles all application command line interface, parses user // flags and runs subcommands type BeatsRootCmd struct { @@ -76,6 +66,7 @@ func GenRootCmdWithSettings(beatCreator beat.Creator, settings instance.Settings rootCmd.Use = settings.Name // Due to a dependence upon the beat name, the default config file path + cfgfile.Initialize() err := cfgfile.ChangeDefaultCfgfileFlag(settings.Name) if err != nil { panic(fmt.Errorf("failed to set default config file path: %w", err)) @@ -96,18 +87,30 @@ func GenRootCmdWithSettings(beatCreator beat.Creator, settings instance.Settings // Persistent flags, common across all subcommands rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("E")) + cfgfile.AddAllowedBackwardsCompatibleFlag("E") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("c")) + cfgfile.AddAllowedBackwardsCompatibleFlag("c") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("d")) + cfgfile.AddAllowedBackwardsCompatibleFlag("d") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("v")) + cfgfile.AddAllowedBackwardsCompatibleFlag("v") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("e")) + cfgfile.AddAllowedBackwardsCompatibleFlag("e") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("environment")) + cfgfile.AddAllowedBackwardsCompatibleFlag("environment") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.config")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.config") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.data")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.data") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.logs")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.logs") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.home")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.home") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("strict.perms")) + cfgfile.AddAllowedBackwardsCompatibleFlag("strict.perms") if f := flag.CommandLine.Lookup("plugin"); f != nil { rootCmd.PersistentFlags().AddGoFlag(f) + cfgfile.AddAllowedBackwardsCompatibleFlag("plugin") } // Inherit root flags from run command diff --git a/libbeat/cmd/run.go b/libbeat/cmd/run.go index b078aadaf89c..d6cadd318cee 100644 --- a/libbeat/cmd/run.go +++ b/libbeat/cmd/run.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" ) @@ -42,9 +43,13 @@ func genRunCmd(settings instance.Settings, beatCreator beat.Creator) *cobra.Comm // Run subcommand flags, only available to *beat run runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("N")) + cfgfile.AddAllowedBackwardsCompatibleFlag("N") runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("httpprof")) + cfgfile.AddAllowedBackwardsCompatibleFlag("httpprof") runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("cpuprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("cpuprofile") runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("memprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("memprofile") if settings.RunFlags != nil { runCmd.Flags().AddFlagSet(settings.RunFlags) diff --git a/libbeat/cmd/setup.go b/libbeat/cmd/setup.go index 64d1f41fdeaf..0b28d22f96f4 100644 --- a/libbeat/cmd/setup.go +++ b/libbeat/cmd/setup.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" ) @@ -111,11 +112,16 @@ func genSetupCmd(settings instance.Settings, beatCreator beat.Creator) *cobra.Co } setup.Flags().Bool(DashboardKey, false, "Setup dashboards") + cfgfile.AddAllowedBackwardsCompatibleFlag(DashboardKey) setup.Flags().Bool(PipelineKey, false, "Setup Ingest pipelines") + cfgfile.AddAllowedBackwardsCompatibleFlag(PipelineKey) setup.Flags().Bool(IndexManagementKey, false, "Setup all components related to Elasticsearch index management, including template, ilm policy and rollover alias") + cfgfile.AddAllowedBackwardsCompatibleFlag(IndexManagementKey) setup.Flags().Bool("enable-all-filesets", false, "Behave as if all modules and filesets had been enabled") + cfgfile.AddAllowedBackwardsCompatibleFlag("enable-all-filesets") setup.Flags().Bool("force-enable-module-filesets", false, "Behave as if all filesets, within enabled modules, are enabled") + cfgfile.AddAllowedBackwardsCompatibleFlag("force-enable-module-filesets") return &setup } diff --git a/libbeat/common/fleetmode/fleet_mode.go b/libbeat/common/fleetmode/fleet_mode.go index af179b887eac..97a17804f647 100644 --- a/libbeat/common/fleetmode/fleet_mode.go +++ b/libbeat/common/fleetmode/fleet_mode.go @@ -17,33 +17,18 @@ package fleetmode -import ( - "flag" - - "github.com/elastic/elastic-agent-libs/config" -) +var managementEnabled bool + +// SetAgentMode stores if the Beat is running under Elastic Agent. +// Normally this is called when the command line flags are parsed. +// This is stored as a package level variable because some components +// (like filebeat/metricbeat modules) don't have access to the +// configuration information to determine this on their own. +func SetAgentMode(enabled bool) { + managementEnabled = enabled +} -// Enabled checks to see if filebeat/metricbeat is running under Agent -// The management setting is stored in the main Beat runtime object, but we can't see that from a module -// So instead we check the CLI flags, since Agent starts filebeat/metricbeat with "-E", "management.enabled=true" +// Enabled returns true if the Beat is running under Elastic Agent. func Enabled() bool { - type management struct { - Enabled bool `config:"management.enabled"` - } - var managementSettings management - - cfgFlag := flag.Lookup("E") - if cfgFlag == nil { - return false - } - - cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) - cliCfg := cfgObject.Config() - - err := cliCfg.Unpack(&managementSettings) - if err != nil { - return false - } - - return managementSettings.Enabled + return managementEnabled } diff --git a/libbeat/libbeat_test.go b/libbeat/libbeat_test.go index e4bac5e309de..1f9e1b3ffe8b 100644 --- a/libbeat/libbeat_test.go +++ b/libbeat/libbeat_test.go @@ -21,6 +21,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -31,11 +32,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/metricbeat/cmd/root.go b/metricbeat/cmd/root.go index e3d308d2508c..497b71bed8ad 100644 --- a/metricbeat/cmd/root.go +++ b/metricbeat/cmd/root.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/ecs" @@ -61,6 +62,7 @@ func MetricbeatSettings(moduleNameSpace string) instance.Settings { } var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("system.hostfs")) + cfgfile.AddAllowedBackwardsCompatibleFlag("system.hostfs") return instance.Settings{ RunFlags: runFlags, Name: Name, diff --git a/metricbeat/main_test.go b/metricbeat/main_test.go index 445db0353f2e..495ce5787e04 100644 --- a/metricbeat/main_test.go +++ b/metricbeat/main_test.go @@ -23,6 +23,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/metricbeat/cmd" ) @@ -33,11 +34,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/packetbeat/cmd/root.go b/packetbeat/cmd/root.go index 7b1c20b34c6e..eb6d14b1692a 100644 --- a/packetbeat/cmd/root.go +++ b/packetbeat/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/pflag" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/ecs" @@ -51,10 +52,15 @@ var RootCmd *cmd.BeatsRootCmd func PacketbeatSettings(globals processors.PluginConfig) instance.Settings { runFlags := pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("I")) + cfgfile.AddAllowedBackwardsCompatibleFlag("I") runFlags.AddGoFlag(flag.CommandLine.Lookup("t")) + cfgfile.AddAllowedBackwardsCompatibleFlag("t") runFlags.AddGoFlag(flag.CommandLine.Lookup("O")) + cfgfile.AddAllowedBackwardsCompatibleFlag("O") runFlags.AddGoFlag(flag.CommandLine.Lookup("l")) + cfgfile.AddAllowedBackwardsCompatibleFlag("l") runFlags.AddGoFlag(flag.CommandLine.Lookup("dump")) + cfgfile.AddAllowedBackwardsCompatibleFlag("dump") return instance.Settings{ RunFlags: runFlags, diff --git a/packetbeat/main_test.go b/packetbeat/main_test.go index 359e70f161b0..2dfcfb4572c5 100644 --- a/packetbeat/main_test.go +++ b/packetbeat/main_test.go @@ -23,6 +23,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/packetbeat/cmd" ) @@ -33,11 +34,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/winlogbeat/main_test.go b/winlogbeat/main_test.go index 0bbc2e4e5262..002c7431f99a 100644 --- a/winlogbeat/main_test.go +++ b/winlogbeat/main_test.go @@ -22,6 +22,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/winlogbeat/cmd" ) @@ -32,12 +33,15 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // TestSystem is the function called when the test binary is started. // Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/agentbeat/main_test.go b/x-pack/agentbeat/main_test.go index 4201d651666e..0333a1ebcdf4 100644 --- a/x-pack/agentbeat/main_test.go +++ b/x-pack/agentbeat/main_test.go @@ -10,6 +10,8 @@ import ( "testing" "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/libbeat/cfgfile" ) var ( @@ -22,11 +24,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") abCommand = AgentBeat() abCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") abCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { if err := abCommand.Execute(); err != nil { os.Exit(1) diff --git a/x-pack/auditbeat/main_test.go b/x-pack/auditbeat/main_test.go index 5d2376452712..3456231f5167 100644 --- a/x-pack/auditbeat/main_test.go +++ b/x-pack/auditbeat/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/auditbeat/cmd" ) @@ -21,11 +22,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/filebeat/main_test.go b/x-pack/filebeat/main_test.go index 3d5424c30884..71b32b9e6e86 100644 --- a/x-pack/filebeat/main_test.go +++ b/x-pack/filebeat/main_test.go @@ -9,6 +9,7 @@ import ( "os" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/tests/system/template" fbcmd "github.com/elastic/beats/v7/x-pack/filebeat/cmd" @@ -24,11 +25,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") fbCommand = fbcmd.Filebeat() fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { if err := fbCommand.Execute(); err != nil { os.Exit(1) diff --git a/x-pack/functionbeat/main_test.go b/x-pack/functionbeat/main_test.go index ecb5ac124355..1c21f88e2891 100644 --- a/x-pack/functionbeat/main_test.go +++ b/x-pack/functionbeat/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/functionbeat/manager/cmd" ) @@ -21,11 +22,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/functionbeat/provider/aws/cmd/root.go b/x-pack/functionbeat/provider/aws/cmd/root.go index fc7a7e0c2515..c8e7ba17d62b 100644 --- a/x-pack/functionbeat/provider/aws/cmd/root.go +++ b/x-pack/functionbeat/provider/aws/cmd/root.go @@ -7,6 +7,7 @@ package cmd import ( "flag" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/functionbeat/function/beater" funcmd "github.com/elastic/beats/v7/x-pack/functionbeat/function/cmd" ) @@ -20,6 +21,9 @@ var RootCmd *funcmd.FunctionCmd func init() { RootCmd = funcmd.NewFunctionCmd(Name, beater.New) RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("d")) + cfgfile.AddAllowedBackwardsCompatibleFlag("d") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("v")) + cfgfile.AddAllowedBackwardsCompatibleFlag("v") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("e")) + cfgfile.AddAllowedBackwardsCompatibleFlag("e") } diff --git a/x-pack/functionbeat/provider/aws/main_test.go b/x-pack/functionbeat/provider/aws/main_test.go index dad745420cb7..f180fd28275d 100644 --- a/x-pack/functionbeat/provider/aws/main_test.go +++ b/x-pack/functionbeat/provider/aws/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/cmd" ) @@ -20,12 +21,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { - + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/functionbeat/provider/local/main_test.go b/x-pack/functionbeat/provider/local/main_test.go index cc941b40c020..7c617b300a65 100644 --- a/x-pack/functionbeat/provider/local/main_test.go +++ b/x-pack/functionbeat/provider/local/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/local/cmd" ) @@ -20,11 +21,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/heartbeat/main_test.go b/x-pack/heartbeat/main_test.go index 91fe8b60ad8c..44d5882a9905 100644 --- a/x-pack/heartbeat/main_test.go +++ b/x-pack/heartbeat/main_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/elastic/beats/v7/heartbeat/cmd" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -19,11 +20,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(_ *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/libbeat/libbeat_test.go b/x-pack/libbeat/libbeat_test.go index 338ebd7e5fbd..a3df546f3f38 100644 --- a/x-pack/libbeat/libbeat_test.go +++ b/x-pack/libbeat/libbeat_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -18,11 +19,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/metricbeat/main_test.go b/x-pack/metricbeat/main_test.go index b092682ccf34..e96a9932765c 100644 --- a/x-pack/metricbeat/main_test.go +++ b/x-pack/metricbeat/main_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/metricbeat/cmd" ) @@ -18,11 +19,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/osquerybeat/main_test.go b/x-pack/osquerybeat/main_test.go index f9ed09ee1a8e..30a9b88efb64 100644 --- a/x-pack/osquerybeat/main_test.go +++ b/x-pack/osquerybeat/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/osquerybeat/cmd" ) @@ -20,11 +21,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(_ *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/packetbeat/main_test.go b/x-pack/packetbeat/main_test.go index 234d68f81698..a332f5c99353 100644 --- a/x-pack/packetbeat/main_test.go +++ b/x-pack/packetbeat/main_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/packetbeat/cmd" ) @@ -18,11 +19,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/winlogbeat/cmd/export.go b/x-pack/winlogbeat/cmd/export.go index 54f9f02e18c8..facd2fb0e929 100644 --- a/x-pack/winlogbeat/cmd/export.go +++ b/x-pack/winlogbeat/cmd/export.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/winlogbeat/module" libversion "github.com/elastic/elastic-agent-libs/version" @@ -48,7 +49,9 @@ func GenExportPipelineCmd(settings instance.Settings) *cobra.Command { } genExportPipelineCmd.Flags().String("es.version", settings.Version, "Elasticsearch version (required)") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") genExportPipelineCmd.Flags().String("dir", "", "Specify directory for exporting pipelines. Default is current directory.") + cfgfile.AddAllowedBackwardsCompatibleFlag("dir") return genExportPipelineCmd } diff --git a/x-pack/winlogbeat/main_test.go b/x-pack/winlogbeat/main_test.go index 2b8547a81726..b4c0f3b124a1 100644 --- a/x-pack/winlogbeat/main_test.go +++ b/x-pack/winlogbeat/main_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/winlogbeat/cmd" ) @@ -18,11 +19,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } From af4668248d381ada8ed9289e72ce4437beb86a45 Mon Sep 17 00:00:00 2001 From: Denis Date: Tue, 22 Oct 2024 18:11:29 +0200 Subject: [PATCH 65/90] Remove `DEV=true` for snapshot packaging (#41365) Packaging with `DEV=true` adds additional Go flags that sometimes lead to linker failures using the old versions of `ld.gold`. See https://github.com/elastic/beats/issues/41270 --- .buildkite/packaging.pipeline.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.buildkite/packaging.pipeline.yml b/.buildkite/packaging.pipeline.yml index 67cdfa363268..07296d3bc3cf 100644 --- a/.buildkite/packaging.pipeline.yml +++ b/.buildkite/packaging.pipeline.yml @@ -88,7 +88,8 @@ steps: env: PLATFORMS: "${PLATFORMS}" SNAPSHOT: true - DEV: true + # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270 + DEV: false command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}" agents: provider: gcp @@ -122,7 +123,8 @@ steps: PLATFORMS: "${PLATFORMS_ARM}" PACKAGES: "docker" SNAPSHOT: true - DEV: true + # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270 + DEV: false command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}" agents: provider: "aws" @@ -152,7 +154,8 @@ steps: env: PLATFORMS: "${PLATFORMS}" SNAPSHOT: true - DEV: true + # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270 + DEV: false command: ".buildkite/scripts/packaging/package-dra.sh x-pack/agentbeat" agents: provider: gcp From d2e6603d82fb044394271fb45fee564aa7dfc4e1 Mon Sep 17 00:00:00 2001 From: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Date: Tue, 22 Oct 2024 13:05:31 -0400 Subject: [PATCH 66/90] Add note about scaling Elastic Agent with Kafka input (#41282) * Add note about scaling Elastic Agent with Kafka input * Update filebeat/docs/inputs/input-kafka.asciidoc Co-authored-by: Craig MacKenzie * Update filebeat/docs/inputs/input-kafka.asciidoc Co-authored-by: Craig MacKenzie * Update filebeat/docs/inputs/input-kafka.asciidoc Co-authored-by: Craig MacKenzie --------- Co-authored-by: Craig MacKenzie --- filebeat/docs/inputs/input-kafka.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/filebeat/docs/inputs/input-kafka.asciidoc b/filebeat/docs/inputs/input-kafka.asciidoc index 96836a63a385..ee0cd7842a46 100644 --- a/filebeat/docs/inputs/input-kafka.asciidoc +++ b/filebeat/docs/inputs/input-kafka.asciidoc @@ -60,6 +60,9 @@ might work as well, but are not supported. The `kafka` input supports the following configuration options plus the <<{beatname_lc}-input-{type}-common-options>> described later. +NOTE: If you're using {agent} with a Kafka input and need to increase throughput, we recommend scaling horizontally by additional {agents} to read from the Kafka topic. +Note that each {agent} reads concurrently from each of the partitions it has been assigned. + [float] [[kafka-hosts]] ===== `hosts` From e82c43cd7ac046c3645fdd42bc8e1f9026223c00 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 22 Oct 2024 18:37:34 -0400 Subject: [PATCH 67/90] system-logs input ignores folders and add tests (#41296) The system-logs input now does not count folders as an "existing file" when looking for files to decide between the journald and log inputs. Unit and integration tests are added for the system-logs input. --- filebeat/input/systemlogs/input.go | 21 ++- filebeat/input/systemlogs/input_linux_test.go | 54 +++++++ filebeat/input/systemlogs/input_test.go | 145 ++++++++++++++++++ filebeat/tests/integration/systemlogs_test.go | 108 +++++++++++++ .../testdata/filebeat_system_module.yml | 16 ++ 5 files changed, 340 insertions(+), 4 deletions(-) create mode 100644 filebeat/input/systemlogs/input_linux_test.go create mode 100644 filebeat/input/systemlogs/input_test.go create mode 100644 filebeat/tests/integration/systemlogs_test.go create mode 100644 filebeat/tests/integration/testdata/filebeat_system_module.yml diff --git a/filebeat/input/systemlogs/input.go b/filebeat/input/systemlogs/input.go index 7badfda760cf..12aef63700c2 100644 --- a/filebeat/input/systemlogs/input.go +++ b/filebeat/input/systemlogs/input.go @@ -20,6 +20,7 @@ package systemlogs import ( "errors" "fmt" + "os" "path/filepath" "github.com/elastic/beats/v7/filebeat/channel" @@ -145,10 +146,20 @@ func useJournald(c *conf.C) (bool, error) { if err != nil { return false, fmt.Errorf("cannot resolve glob: %w", err) } - if len(paths) != 0 { - // We found at least one system log file, - // journald will not be used, return early - logger.Info( + + for _, p := range paths { + stat, err := os.Stat(p) + if err != nil { + return false, fmt.Errorf("cannot stat '%s': %w", p, err) + } + + // Ignore directories + if stat.IsDir() { + continue + } + + // We found one file, return early + logger.Infof( "using log input because file(s) was(were) found when testing glob '%s'", g) return false, nil @@ -156,6 +167,8 @@ func useJournald(c *conf.C) (bool, error) { } // if no system log files are found, then use jounrald + logger.Info("no files were found, using journald input") + return true, nil } diff --git a/filebeat/input/systemlogs/input_linux_test.go b/filebeat/input/systemlogs/input_linux_test.go new file mode 100644 index 000000000000..251ef6cae67a --- /dev/null +++ b/filebeat/input/systemlogs/input_linux_test.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package systemlogs + +import ( + "testing" + + conf "github.com/elastic/elastic-agent-libs/config" +) + +func TestJournaldInputIsCreated(t *testing.T) { + c := map[string]any{ + "files.paths": []string{"/file/does/not/exist"}, + // The 'journald' object needs to exist for the input to be instantiated + "journald.enabled": true, + } + + cfg := conf.MustNewConfigFrom(c) + + _, inp, err := configure(cfg) + if err != nil { + t.Fatalf("did not expect an error calling newV1Input: %s", err) + } + + type namer interface { + Name() string + } + + i, isNamer := inp.(namer) + if !isNamer { + t.Fatalf("expecting an instance of *log.Input, got '%T' instead", inp) + } + + if got, expected := i.Name(), "journald"; got != expected { + t.Fatalf("expecting '%s' input, got '%s'", expected, got) + } +} diff --git a/filebeat/input/systemlogs/input_test.go b/filebeat/input/systemlogs/input_test.go new file mode 100644 index 000000000000..6e5526f17361 --- /dev/null +++ b/filebeat/input/systemlogs/input_test.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemlogs + +import ( + "os" + "testing" + + "github.com/elastic/beats/v7/filebeat/channel" + "github.com/elastic/beats/v7/filebeat/input" + "github.com/elastic/beats/v7/filebeat/input/log" + "github.com/elastic/beats/v7/libbeat/beat" + conf "github.com/elastic/elastic-agent-libs/config" +) + +func generateFile(t *testing.T) string { + // Create a know file for testing, the content is not relevant + // it just needs to exist + knwonFile, err := os.CreateTemp(t.TempDir(), t.Name()+"knwonFile*") + if err != nil { + t.Fatalf("cannot create temporary file: %s", err) + } + + if _, err := knwonFile.WriteString("Bowties are cool"); err != nil { + t.Fatalf("cannot write to temporary file '%s': %s", knwonFile.Name(), err) + } + knwonFile.Close() + + return knwonFile.Name() +} + +func TestUseJournald(t *testing.T) { + filename := generateFile(t) + + testCases := map[string]struct { + cfg map[string]any + useJournald bool + expectErr bool + }{ + "No files found": { + cfg: map[string]any{ + "files.paths": []string{"/file/does/not/exist"}, + }, + useJournald: true, + }, + "File exists": { + cfg: map[string]any{ + "files.paths": []string{filename}, + }, + useJournald: false, + }, + "use_journald is true": { + cfg: map[string]any{ + "use_journald": true, + "journald": struct{}{}, + }, + useJournald: true, + }, + "use_files is true": { + cfg: map[string]any{ + "use_files": true, + "journald": nil, + "files": struct{}{}, + }, + useJournald: false, + }, + "use_journald and use_files are true": { + cfg: map[string]any{ + "use_files": true, + "use_journald": true, + "journald": struct{}{}, + }, + useJournald: false, + expectErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + cfg := conf.MustNewConfigFrom(tc.cfg) + + useJournald, err := useJournald(cfg) + if !tc.expectErr && err != nil { + t.Fatalf("did not expect an error calling 'useJournald': %s", err) + } + if tc.expectErr && err == nil { + t.Fatal("expecting an error when calling 'userJournald', got none") + } + + if useJournald != tc.useJournald { + t.Fatalf("expecting 'useJournald' to be %t, got %t", + tc.useJournald, useJournald) + } + }) + } +} + +func TestLogInputIsInstantiated(t *testing.T) { + filename := generateFile(t) + c := map[string]any{ + "files.paths": []string{filename}, + } + + cfg := conf.MustNewConfigFrom(c) + + inp, err := newV1Input(cfg, connectorMock{}, input.Context{}) + if err != nil { + t.Fatalf("did not expect an error calling newV1Input: %s", err) + } + _, isLogInput := inp.(*log.Input) + if !isLogInput { + t.Fatalf("expecting an instance of *log.Input, got '%T' instead", inp) + } +} + +type connectorMock struct{} + +func (mock connectorMock) Connect(c *conf.C) (channel.Outleter, error) { + return outleterMock{}, nil +} + +func (mock connectorMock) ConnectWith(c *conf.C, clientConfig beat.ClientConfig) (channel.Outleter, error) { + return outleterMock{}, nil +} + +type outleterMock struct{} + +func (o outleterMock) Close() error { return nil } +func (o outleterMock) Done() <-chan struct{} { return make(chan struct{}) } +func (o outleterMock) OnEvent(beat.Event) bool { return false } diff --git a/filebeat/tests/integration/systemlogs_test.go b/filebeat/tests/integration/systemlogs_test.go new file mode 100644 index 000000000000..fa11b062d4e0 --- /dev/null +++ b/filebeat/tests/integration/systemlogs_test.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration && linux + +package integration + +import ( + _ "embed" + "fmt" + "os" + "path" + "path/filepath" + "testing" + "time" + + cp "github.com/otiai10/copy" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +//go:embed testdata/filebeat_system_module.yml +var systemModuleCfg string + +// TestSystemLogsCanUseJournald aims to ensure the system-logs input can +// correctly choose and start a journald input when the globs defined in +// var.paths do not resolve to any file. +func TestSystemLogsCanUseJournaldInput(t *testing.T) { + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + workDir := filebeat.TempDir() + copyModulesDir(t, workDir) + + // As the name says, we want this folder to exist bu t be empty + globWithoutFiles := filepath.Join(filebeat.TempDir(), "this-folder-does-not-exist") + yamlCfg := fmt.Sprintf(systemModuleCfg, globWithoutFiles, workDir) + + filebeat.WriteConfigFile(yamlCfg) + filebeat.Start() + + filebeat.WaitForLogs( + "no files were found, using journald input", + 10*time.Second, + "system-logs did not select journald input") + filebeat.WaitForLogs( + "journalctl started with PID", + 10*time.Second, + "system-logs did not start journald input") +} + +func TestSystemLogsCanUseLogInput(t *testing.T) { + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + workDir := filebeat.TempDir() + copyModulesDir(t, workDir) + + logFilePath := path.Join(workDir, "syslog") + integration.GenerateLogFile(t, logFilePath, 5, false) + yamlCfg := fmt.Sprintf(systemModuleCfg, logFilePath, workDir) + + filebeat.WriteConfigFile(yamlCfg) + filebeat.Start() + + filebeat.WaitForLogs( + "using log input because file(s) was(were) found", + 10*time.Second, + "system-logs did not select the log input") + filebeat.WaitForLogs( + "Harvester started for paths:", + 10*time.Second, + "system-logs did not start the log input") +} + +func copyModulesDir(t *testing.T, dst string) { + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("cannot get the current directory: %s", err) + } + localModules := filepath.Join(pwd, "../", "../", "module") + localModulesD := filepath.Join(pwd, "../", "../", "modules.d") + + if err := cp.Copy(localModules, filepath.Join(dst, "module")); err != nil { + t.Fatalf("cannot copy 'module' folder to test folder: %s", err) + } + if err := cp.Copy(localModulesD, filepath.Join(dst, "modules.d")); err != nil { + t.Fatalf("cannot copy 'modules.d' folder to test folder: %s", err) + } +} diff --git a/filebeat/tests/integration/testdata/filebeat_system_module.yml b/filebeat/tests/integration/testdata/filebeat_system_module.yml new file mode 100644 index 000000000000..27de8f2a414a --- /dev/null +++ b/filebeat/tests/integration/testdata/filebeat_system_module.yml @@ -0,0 +1,16 @@ +filebeat.modules: + - module: system + syslog: + enabled: true + var.paths: + - "%s" + +path.home: %s + +queue.mem: + flush.timeout: 0 + +output: + file: + path: ${path.home} + filename: "output" From 6b540745cb3c990b9320bf2f1380bb1b4579a31f Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Wed, 23 Oct 2024 10:05:43 +1030 Subject: [PATCH 68/90] x-pack/filebeat/input/entityanalytics/provider/activedirectory: improve modification time and deletion logic (#41179) This improves the update time stamps of modified events by using the documents' whenChanged fields in the case of returned documents, and the current time when a document is identified as having been deleted. The latest of these is used to determine the time filter for the next Active Directory query. Documents are marked as deleted only when they are found to not exist in full sync collection, and are removed from the state store when they are identified as deleted. The change in behaviour to not use updates to identify corrects behaviour that would cause older but not deleted entities to be deleted from the index. --- CHANGELOG.next.asciidoc | 1 + .../activedirectory/activedirectory.go | 187 +++++++++--------- .../provider/activedirectory/statestore.go | 7 + 3 files changed, 103 insertions(+), 92 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index d582c3c36916..9dd77328747e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -166,6 +166,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fixed failed job handling and removed false-positive error logs in the GCS input. {pull}41142[41142] - Bump github.com/elastic/go-sfdc dependency used by x-pack/filebeat/input/salesforce. {pull}41192[41192] - Log bad handshake details when websocket connection fails {pull}41300[41300] +- Improve modification time handling for entities and entity deletion logic in the Active Directory entityanalytics input. {pull}41179[41179] *Heartbeat* diff --git a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go index ab1a37cbced1..c66004807b4b 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go +++ b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go @@ -130,6 +130,10 @@ func (p *adInput) Run(inputCtx v2.Context, store *kvstore.Store, client beat.Cli p.cfg.UserAttrs = withMandatory(p.cfg.UserAttrs, "distinguishedName", "whenChanged") p.cfg.GrpAttrs = withMandatory(p.cfg.GrpAttrs, "distinguishedName", "whenChanged") + var ( + last time.Time + err error + ) for { select { case <-inputCtx.Cancelation.Done(): @@ -137,9 +141,9 @@ func (p *adInput) Run(inputCtx v2.Context, store *kvstore.Store, client beat.Cli return inputCtx.Cancelation.Err() } return nil - case <-syncTimer.C: - start := time.Now() - if err := p.runFullSync(inputCtx, store, client); err != nil { + case start := <-syncTimer.C: + last, err = p.runFullSync(inputCtx, store, client) + if err != nil { p.logger.Errorw("Error running full sync", "error", err) p.metrics.syncError.Inc() } @@ -157,9 +161,9 @@ func (p *adInput) Run(inputCtx v2.Context, store *kvstore.Store, client beat.Cli } updateTimer.Reset(p.cfg.UpdateInterval) p.logger.Debugf("Next update expected at: %v", time.Now().Add(p.cfg.UpdateInterval)) - case <-updateTimer.C: - start := time.Now() - if err := p.runIncrementalUpdate(inputCtx, store, client); err != nil { + case start := <-updateTimer.C: + last, err = p.runIncrementalUpdate(inputCtx, store, last, client) + if err != nil { p.logger.Errorw("Error running incremental update", "error", err) p.metrics.updateError.Inc() } @@ -192,13 +196,13 @@ outer: // identities from Azure Active Directory, enrich users with group memberships, // and publishes all known users (regardless if they have been modified) to the // given beat.Client. -func (p *adInput) runFullSync(inputCtx v2.Context, store *kvstore.Store, client beat.Client) error { +func (p *adInput) runFullSync(inputCtx v2.Context, store *kvstore.Store, client beat.Client) (time.Time, error) { p.logger.Debugf("Running full sync...") p.logger.Debugf("Opening new transaction...") state, err := newStateStore(store) if err != nil { - return fmt.Errorf("unable to begin transaction: %w", err) + return time.Time{}, fmt.Errorf("unable to begin transaction: %w", err) } p.logger.Debugf("Transaction opened") defer func() { // If commit is successful, call to this close will be no-op. @@ -210,48 +214,89 @@ func (p *adInput) runFullSync(inputCtx v2.Context, store *kvstore.Store, client ctx := ctxtool.FromCanceller(inputCtx.Cancelation) p.logger.Debugf("Starting fetch...") - _, err = p.doFetchUsers(ctx, state, true) + users, err := p.doFetchUsers(ctx, state, true) if err != nil { - return err + return time.Time{}, err } - if len(state.users) != 0 { - tracker := kvstore.NewTxTracker(ctx) - - start := time.Now() - p.publishMarker(start, start, inputCtx.ID, true, client, tracker) - for _, u := range state.users { - p.publishUser(u, state, inputCtx.ID, client, tracker) + if len(users) != 0 || state.len() != 0 { + // Active Directory does not have a notion of deleted users + // beyond absence from the directory, so compare found users + // with users already known by the state store and if any + // are in the store but not returned in the previous fetch, + // mark them as deleted and publish the deletion. We do not + // have the time of the deletion, so use now. + if state.len() != 0 { + found := make(map[string]bool) + for _, u := range users { + found[u.ID] = true + } + deleted := make(map[string]*User) + now := time.Now() + state.forEach(func(u *User) { + if u.State == Deleted { + // We have already seen that this is deleted + // so we do not need to publish again. The + // user will be deleted from the store when + // the state is closed. + return + } + if found[u.ID] { + // We have the user, so we do not need to + // mark it as deleted. + return + } + // This modifies the state store's copy since u + // is a pointer held by the state store map. + u.State = Deleted + u.WhenChanged = now + deleted[u.ID] = u + }) + for _, u := range deleted { + users = append(users, u) + } + } + if len(users) != 0 { + var tracker *kvstore.TxTracker + start := time.Now() + p.publishMarker(start, start, inputCtx.ID, true, client, tracker) + tracker = kvstore.NewTxTracker(ctx) + for _, u := range users { + p.publishUser(u, state, inputCtx.ID, client, tracker) + } + end := time.Now() + p.publishMarker(end, end, inputCtx.ID, false, client, tracker) + tracker.Wait() } - - end := time.Now() - p.publishMarker(end, end, inputCtx.ID, false, client, tracker) - - tracker.Wait() } if ctx.Err() != nil { - return ctx.Err() + return time.Time{}, ctx.Err() } - state.lastSync = time.Now() + // state.whenChanged is modified by the call to doFetchUsers to be + // the latest modification time for all of the users that have been + // collected in that call. This will not include any of the deleted + // users since they were not collected. + latest := state.whenChanged + state.lastSync = latest err = state.close(true) if err != nil { - return fmt.Errorf("unable to commit state: %w", err) + return time.Time{}, fmt.Errorf("unable to commit state: %w", err) } - return nil + return latest, nil } // runIncrementalUpdate will run an incremental update. The process is similar // to full synchronization, except only users which have changed (newly // discovered, modified, or deleted) will be published. -func (p *adInput) runIncrementalUpdate(inputCtx v2.Context, store *kvstore.Store, client beat.Client) error { +func (p *adInput) runIncrementalUpdate(inputCtx v2.Context, store *kvstore.Store, last time.Time, client beat.Client) (time.Time, error) { p.logger.Debugf("Running incremental update...") state, err := newStateStore(store) if err != nil { - return fmt.Errorf("unable to begin transaction: %w", err) + return last, fmt.Errorf("unable to begin transaction: %w", err) } defer func() { // If commit is successful, call to this close will be no-op. closeErr := state.close(false) @@ -263,62 +308,37 @@ func (p *adInput) runIncrementalUpdate(inputCtx v2.Context, store *kvstore.Store ctx := ctxtool.FromCanceller(inputCtx.Cancelation) updatedUsers, err := p.doFetchUsers(ctx, state, false) if err != nil { - return err + return last, err } - var tracker *kvstore.TxTracker - if len(updatedUsers) != 0 || state.len() != 0 { - // Active Directory does not have a notion of deleted users - // beyond absence from the directory, so compare found users - // with users already known by the state store and if any - // are in the store but not returned in the previous fetch, - // mark them as deleted and publish the deletion. We do not - // have the time of the deletion, so use now. - if state.len() != 0 { - found := make(map[string]bool) - for _, u := range updatedUsers { - found[u.ID] = true - } - deleted := make(map[string]*User) - now := time.Now() - state.forEach(func(u *User) { - if u.State == Deleted || found[u.ID] { - return - } - // This modifies the state store's copy since u - // is a pointer held by the state store map. - u.State = Deleted - u.WhenChanged = now - deleted[u.ID] = u - }) - for _, u := range deleted { - updatedUsers = append(updatedUsers, u) - } - } - if len(updatedUsers) != 0 { - tracker = kvstore.NewTxTracker(ctx) - for _, u := range updatedUsers { - p.publishUser(u, state, inputCtx.ID, client, tracker) - } - tracker.Wait() + if len(updatedUsers) != 0 { + tracker := kvstore.NewTxTracker(ctx) + for _, u := range updatedUsers { + p.publishUser(u, state, inputCtx.ID, client, tracker) } + tracker.Wait() } if ctx.Err() != nil { - return ctx.Err() + return last, ctx.Err() } - state.lastUpdate = time.Now() + // state.whenChanged is modified by the call to doFetchUsers to be + // the latest modification time for all of the users that have been + // collected in that call. + latest := state.whenChanged + state.lastUpdate = latest if err = state.close(true); err != nil { - return fmt.Errorf("unable to commit state: %w", err) + return last, fmt.Errorf("unable to commit state: %w", err) } - return nil + return latest, nil } // doFetchUsers handles fetching user identities from Active Directory. If // fullSync is true, then any existing whenChanged will be ignored, forcing a -// full synchronization from Active Directory. +// full synchronization from Active Directory. The whenChanged time of state +// is modified to be the time stamp of the latest User.WhenChanged value. // Returns a set of modified users by ID. func (p *adInput) doFetchUsers(ctx context.Context, state *stateStore, fullSync bool) ([]*User, error) { var since time.Time @@ -332,31 +352,14 @@ func (p *adInput) doFetchUsers(ctx context.Context, state *stateStore, fullSync return nil, err } - var ( - users []*User - whenChanged time.Time - ) - if fullSync { - for _, u := range entries { - state.storeUser(u) - if u.WhenChanged.After(whenChanged) { - whenChanged = u.WhenChanged - } + users := make([]*User, 0, len(entries)) + for _, u := range entries { + users = append(users, state.storeUser(u)) + if u.WhenChanged.After(state.whenChanged) { + state.whenChanged = u.WhenChanged } - } else { - users = make([]*User, 0, len(entries)) - for _, u := range entries { - users = append(users, state.storeUser(u)) - if u.WhenChanged.After(whenChanged) { - whenChanged = u.WhenChanged - } - } - p.logger.Debugf("processed %d users from API", len(users)) } - if whenChanged.After(state.whenChanged) { - state.whenChanged = whenChanged - } - + p.logger.Debugf("processed %d users from API", len(users)) return users, nil } diff --git a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go index 74486ebaac69..c81ece21a30a 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go +++ b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go @@ -170,6 +170,13 @@ func (s *stateStore) close(commit bool) (err error) { } for key, value := range s.users { + if value.State == Deleted { + err = s.tx.Delete(usersBucket, []byte(key)) + if err != nil { + return fmt.Errorf("unable to delete user %q from state: %w", key, err) + } + continue + } err = s.tx.Set(usersBucket, []byte(key), value) if err != nil { return fmt.Errorf("unable to save user %q to state: %w", key, err) From 1aa9ef69d3d93330605287a2f6fb7c83e1d99a5f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 10:46:02 +0200 Subject: [PATCH 69/90] [main](backport #41379) Document 8.15 memory usage regression as a known issue. (#41380) * Document memory usage regression as a known issue. (#41379) (cherry picked from commit f8bd07d239e18ad9e50488116565e9a2cca19137) # Conflicts: # CHANGELOG.asciidoc * Fix conflicts --------- Co-authored-by: Craig MacKenzie --- CHANGELOG.asciidoc | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 449bc30dd529..0de208bcee96 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -7,6 +7,12 @@ === Beats version 8.15.3 https://github.com/elastic/beats/compare/v8.15.2\...v8.15.3[View commits] +==== Known issues + +*Affecting all Beats* + +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] + ==== Breaking changes *Filebeat* @@ -54,6 +60,12 @@ https://github.com/elastic/beats/compare/v8.15.0\...v8.15.2[View commits] - Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. {issue}41118[41118] +==== Known issues + +*Affecting all Beats* + +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] + ==== Bugfixes *Affecting all Beats* @@ -72,7 +84,9 @@ https://github.com/elastic/beats/compare/v8.15.0\...v8.15.1[View commits] *Affecting all Beats* -- Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. {issue}41118[41118] +- Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. +- Beats stop publishing data after a network error unless restarted. Avoid upgrading to 8.15.1. Affected Beats log `Get \"https://${ELASTICSEARCH_HOST}:443\": context canceled` repeatedly. {issue}40705{40705} +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] ==== Bugfixes @@ -129,6 +143,7 @@ https://github.com/elastic/beats/compare/v8.14.3\...v8.15.0[View commits] *Filebeat* - The Azure EventHub input in Filebeat is not found when running on Windows. Please refrain from upgrading to 8.15. See {issue}40608[40608] for details. +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] ==== Breaking changes From d2796da076f90a445d229599c3e878232637c21d Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 23 Oct 2024 09:03:04 -0400 Subject: [PATCH 70/90] Setprocess.name on syslog journald (#41354) This PR adds the missing process.name field to System module, Syslog fileset --- filebeat/module/system/syslog/ingest/journald.yml | 6 +++++- .../system/syslog/test/debian-12.journal-expected.json | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/filebeat/module/system/syslog/ingest/journald.yml b/filebeat/module/system/syslog/ingest/journald.yml index 68400c8f5071..30952e96aea8 100644 --- a/filebeat/module/system/syslog/ingest/journald.yml +++ b/filebeat/module/system/syslog/ingest/journald.yml @@ -5,7 +5,11 @@ processors: copy_from: _ingest.timestamp - set: field: "process.pid" - value: '{{ journald.pid }}' + value: "{{ journald.pid }}" +- set: + field: "process.name" + value: "{{ journald.process.name }}" + ignore_failure: true - set: field: event.kind value: event diff --git a/filebeat/module/system/syslog/test/debian-12.journal-expected.json b/filebeat/module/system/syslog/test/debian-12.journal-expected.json index 3e9b606be267..294550fb2db3 100644 --- a/filebeat/module/system/syslog/test/debian-12.journal-expected.json +++ b/filebeat/module/system/syslog/test/debian-12.journal-expected.json @@ -16,6 +16,7 @@ ], "process.args_count": 1, "process.command_line": "/sbin/init", + "process.name": "systemd", "process.pid": "1", "related.hosts": [ "vagrant-debian-12" @@ -36,6 +37,7 @@ "log.syslog.facility.code": 0, "log.syslog.priority": 6, "message": "Console: switching to colour frame buffer device 160x50", + "process.name": "", "process.pid": "", "related.hosts": [ "vagrant-debian-12" @@ -54,6 +56,7 @@ "log.syslog.facility.code": 0, "log.syslog.priority": 6, "message": "thermal_sys: Registered thermal governor 'power_allocator'", + "process.name": "", "process.pid": "", "related.hosts": [ "bookworm" From 77e7d804121e7ab2090540730ec4dca4c0428485 Mon Sep 17 00:00:00 2001 From: zero <164434719@qq.com> Date: Wed, 23 Oct 2024 23:52:46 +0800 Subject: [PATCH 71/90] [Enhancement] add output kafka support for zstd (#40880) * add output kafka support for zstd * add docs * add docs * revert go mod * add more comments * add kafka_integration_test * modify test * modify test * modify test * modify test * add test * fixes for linter * change comment * change version * change version --------- Co-authored-by: weiye.gong Co-authored-by: Lee E. Hinman Co-authored-by: Pierre HILBERT --- CHANGELOG.asciidoc | 1 + CHANGELOG.next.asciidoc | 1 + libbeat/outputs/kafka/config.go | 6 ++++ libbeat/outputs/kafka/docs/kafka.asciidoc | 2 +- .../outputs/kafka/kafka_integration_test.go | 28 ++++++++++++++++--- 5 files changed, 33 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 0de208bcee96..6b5731b98d39 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -44,6 +44,7 @@ https://github.com/elastic/beats/compare/v8.15.2\...v8.15.3[View commits] *Affecting all Beats* - Update Go version to 1.22.8. {pull}41139[41139] +- Add kafka compression support for ZSTD. *Metricbeat* diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9dd77328747e..26962b3fd635 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -49,6 +49,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - The performance of ingesting SQS data with the S3 input has improved by up to 60x for queues with many small events. `max_number_of_messages` config for SQS mode is now ignored, as the new design no longer needs a manual cap on messages. Instead, use `number_of_workers` to scale ingestion rate in both S3 and SQS modes. The increased efficiency may increase network bandwidth consumption, which can be throttled by lowering `number_of_workers`. It may also increase number of events stored in memory, which can be throttled by lowering the configured size of the internal queue. {pull}40699[40699] - System module events now contain `input.type: systemlogs` instead of `input.type: log` when harvesting log files. {pull}41061[41061] +- Add kafka compression support for ZSTD. *Heartbeat* diff --git a/libbeat/outputs/kafka/config.go b/libbeat/outputs/kafka/config.go index 19055e0b3175..c7dc74ee9934 100644 --- a/libbeat/outputs/kafka/config.go +++ b/libbeat/outputs/kafka/config.go @@ -100,12 +100,18 @@ var compressionModes = map[string]sarama.CompressionCodec{ // As of sarama 1.24.1, zstd support is broken // (https://github.com/Shopify/sarama/issues/1252), which needs to be // addressed before we add support here. + + // (https://github.com/IBM/sarama/pull/1574) sarama version 1.26.0 has + // fixed this issue and elastic version of sarama has merged this commit. + // (https://github.com/elastic/sarama/commit/37faed7ffc7d59e681d99cfebd1f3d453d6d607c) + "none": sarama.CompressionNone, "no": sarama.CompressionNone, "off": sarama.CompressionNone, "gzip": sarama.CompressionGZIP, "lz4": sarama.CompressionLZ4, "snappy": sarama.CompressionSnappy, + "zstd": sarama.CompressionZSTD, } func defaultConfig() kafkaConfig { diff --git a/libbeat/outputs/kafka/docs/kafka.asciidoc b/libbeat/outputs/kafka/docs/kafka.asciidoc index d1b3d9375598..9907cad61c29 100644 --- a/libbeat/outputs/kafka/docs/kafka.asciidoc +++ b/libbeat/outputs/kafka/docs/kafka.asciidoc @@ -300,7 +300,7 @@ The keep-alive period for an active network connection. If 0s, keep-alives are d ===== `compression` -Sets the output compression codec. Must be one of `none`, `snappy`, `lz4` and `gzip`. The default is `gzip`. +Sets the output compression codec. Must be one of `none`, `snappy`, `lz4`, `gzip` and `zstd`. The default is `gzip`. [IMPORTANT] .Known issue with Azure Event Hub for Kafka diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index 29fc72ac8590..e9abc559774d 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -240,6 +240,18 @@ func TestKafkaPublish(t *testing.T) { "host": "test-host", }), }, + { + "publish message with zstd compression to test topic", + map[string]interface{}{ + "compression": "zstd", + "version": "2.2", + }, + testTopic, + single(mapstr.M{ + "host": "test-host", + "message": id, + }), + }, } defaultConfig := map[string]interface{}{ @@ -254,7 +266,10 @@ func TestKafkaPublish(t *testing.T) { cfg := makeConfig(t, defaultConfig) if test.config != nil { - cfg.Merge(makeConfig(t, test.config)) + err := cfg.Merge(makeConfig(t, test.config)) + if err != nil { + t.Fatal(err) + } } t.Run(name, func(t *testing.T) { @@ -263,7 +278,8 @@ func TestKafkaPublish(t *testing.T) { t.Fatal(err) } - output := grp.Clients[0].(*client) + output, ok := grp.Clients[0].(*client) + assert.True(t, ok, "grp.Clients[0] didn't contain a ptr to client") if err := output.Connect(); err != nil { t.Fatal(err) } @@ -279,7 +295,10 @@ func TestKafkaPublish(t *testing.T) { } wg.Add(1) - output.Publish(context.Background(), batch) + err := output.Publish(context.Background(), batch) + if err != nil { + t.Fatal(err) + } } // wait for all published batches to be ACKed @@ -335,7 +354,8 @@ func validateJSON(t *testing.T, value []byte, events []beat.Event) string { return "" } - msg := decoded["message"].(string) + msg, ok := decoded["message"].(string) + assert.True(t, ok, "type of decoded message was not string") event := findEvent(events, msg) if event == nil { t.Errorf("could not find expected event with message: %v", msg) From d21ed32a08fb4e0aed77cc3200b18bcd7093244c Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 23 Oct 2024 15:21:10 -0400 Subject: [PATCH 72/90] Read journal entries from all boots (#41244) Some versions of journalctl will only return messages from the current boot when --follow is passed, it will even ignore the cursor or date arguments. This commit reads messages from all boots by first calling journalctl without the --follow flag, reading all entries and once it successfully exits, then we restart journalctl with the cursor and the --follow flag. The parser test is upsted to use ndjson parser instead of multiline because the multiline parser can have issues when journald input is reading from files. There is a corner case where the journalctl exits successfully and the reader goroutine gets an error, this makes Next to return early, making the multiline to also return early. TestJournaldInput assumed journalctl was run only once, however that has changed recently. The test is updated to accommodate for that and rename to TestJournaldInputRunsAndRecoversFromJournalctlFailures, which better reflects what it is actually testing. --------- Co-authored-by: Pierre HILBERT --- CHANGELOG.next.asciidoc | 1 + filebeat/input/journald/environment_test.go | 4 +- .../input/journald/input_filtering_test.go | 8 +- filebeat/input/journald/input_parsers_test.go | 36 ++-- filebeat/input/journald/input_test.go | 64 ++---- .../journald/pkg/journalctl/jctlmock_test.go | 6 +- .../journald/pkg/journalctl/journalctl.go | 32 ++- .../input/journald/pkg/journalctl/reader.go | 203 ++++++++++++++---- .../journald/pkg/journalctl/reader_test.go | 12 +- .../journald/testdata/multiple-boots.export | 86 ++++++++ .../journald/testdata/multiple-boots.journal | Bin 0 -> 8388608 bytes .../journald/testdata/ndjson-parser.export | Bin 0 -> 830 bytes .../journald/testdata/ndjson-parser.journal | Bin 0 -> 8388608 bytes filebeat/tests/integration/journald_test.go | 9 +- libbeat/tests/integration/framework.go | 62 ++++++ 15 files changed, 392 insertions(+), 131 deletions(-) create mode 100644 filebeat/input/journald/testdata/multiple-boots.export create mode 100644 filebeat/input/journald/testdata/multiple-boots.journal create mode 100644 filebeat/input/journald/testdata/ndjson-parser.export create mode 100644 filebeat/input/journald/testdata/ndjson-parser.journal diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 26962b3fd635..e583113b0ab4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -168,6 +168,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Bump github.com/elastic/go-sfdc dependency used by x-pack/filebeat/input/salesforce. {pull}41192[41192] - Log bad handshake details when websocket connection fails {pull}41300[41300] - Improve modification time handling for entities and entity deletion logic in the Active Directory entityanalytics input. {pull}41179[41179] +- Journald input now can read events from all boots {issue}41083[41083] {pull}41244[41244] *Heartbeat* diff --git a/filebeat/input/journald/environment_test.go b/filebeat/input/journald/environment_test.go index 209a2e2dfd8d..57f75163e926 100644 --- a/filebeat/input/journald/environment_test.go +++ b/filebeat/input/journald/environment_test.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" @@ -107,7 +107,7 @@ func (e *inputTestingEnvironment) waitUntilEventCount(count int) { e.t.Helper() msg := strings.Builder{} fmt.Fprintf(&msg, "did not find the expected %d events", count) - assert.Eventually(e.t, func() bool { + require.Eventually(e.t, func() bool { sum := len(e.pipeline.GetAllEvents()) if sum == count { return true diff --git a/filebeat/input/journald/input_filtering_test.go b/filebeat/input/journald/input_filtering_test.go index c9ddec9c0467..1aa58d1f8bc3 100644 --- a/filebeat/input/journald/input_filtering_test.go +++ b/filebeat/input/journald/input_filtering_test.go @@ -274,9 +274,11 @@ func TestInputSeek(t *testing.T) { env.waitUntilEventCount(len(testCase.expectedMessages)) - for idx, event := range env.pipeline.GetAllEvents() { - if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { - t.Fatalf("expecting event message %q, got %q", expected, got) + if !t.Failed() { + for idx, event := range env.pipeline.GetAllEvents() { + if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { + t.Fatalf("expecting event message %q, got %q", expected, got) + } } } }) diff --git a/filebeat/input/journald/input_parsers_test.go b/filebeat/input/journald/input_parsers_test.go index 720f53b8ce8b..c1c2c6f6bb5b 100644 --- a/filebeat/input/journald/input_parsers_test.go +++ b/filebeat/input/journald/input_parsers_test.go @@ -31,31 +31,41 @@ import ( // it only tests a single parser, but that is enough to ensure // we're correctly using the parsers func TestInputParsers(t *testing.T) { - inputParsersExpected := []string{"1st line\n2nd line\n3rd line", "4th line\n5th line\n6th line"} env := newInputTestingEnvironment(t) - inp := env.mustCreateInput(mapstr.M{ - "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, - "include_matches.match": []string{"_SYSTEMD_USER_UNIT=log-service.service"}, + "paths": []string{path.Join("testdata", "ndjson-parser.journal")}, "parsers": []mapstr.M{ { - "multiline": mapstr.M{ - "type": "count", - "count_lines": 3, + "ndjson": mapstr.M{ + "target": "", }, }, }, }) ctx, cancelInput := context.WithCancel(context.Background()) + t.Cleanup(cancelInput) env.startInput(ctx, inp) - env.waitUntilEventCount(len(inputParsersExpected)) + env.waitUntilEventCount(1) + event := env.pipeline.clients[0].GetEvents()[0] + + foo, isString := event.Fields["foo"].(string) + if !isString { + t.Errorf("expecting field 'foo' to be string, got %T", event.Fields["foo"]) + } - for idx, event := range env.pipeline.clients[0].GetEvents() { - if got, expected := event.Fields["message"], inputParsersExpected[idx]; got != expected { - t.Errorf("expecting event message %q, got %q", expected, got) - } + answer, isInt := event.Fields["answer"].(int64) + if !isInt { + t.Errorf("expecting field 'answer' to be int64, got %T", event.Fields["answer"]) } - cancelInput() + // The JSON in the test journal is: '{"foo": "bar", "answer":42}' + expectedFoo := "bar" + expectedAnswer := int64(42) + if foo != expectedFoo { + t.Errorf("expecting 'foo' from the Journal JSON to be '%s' got '%s' instead", expectedFoo, foo) + } + if answer != expectedAnswer { + t.Errorf("expecting 'answer' from the Journal JSON to be '%d' got '%d' instead", expectedAnswer, answer) + } } diff --git a/filebeat/input/journald/input_test.go b/filebeat/input/journald/input_test.go index 09dd8d1a485f..b82663c52626 100644 --- a/filebeat/input/journald/input_test.go +++ b/filebeat/input/journald/input_test.go @@ -39,59 +39,19 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" ) -// How to write to journal from CLI: -// https://www.baeldung.com/linux/systemd-journal-message-terminal +func TestInputCanReadAllBoots(t *testing.T) { + env := newInputTestingEnvironment(t) + cfg := mapstr.M{ + "paths": []string{path.Join("testdata", "multiple-boots.journal")}, + } + inp := env.mustCreateInput(cfg) -// TestGenerateJournalEntries generates entries in the user's journal. -// It is kept commented out at the top of the file as reference and -// easy access. -// -// How to generate a journal file with only the entries you want: -// 1. Add the dependencies for this test -// go get github.com/ssgreg/journald -// 2. Uncomment and run the test: -// 3. Add the following import: -// journaldlogger "github.com/ssgreg/journald" -// 4. Get a VM, ssh into it, make sure you can access the test from it -// 5. Find the journal file, usually at /var/log/journal//user-1000.journal -// 7. Clean and rotate the journal -// sudo journalctl --vacuum-time=1s -// sudo journalctl --rotate -// 8. Run this test: `go test -run=TestGenerateJournalEntries` -// 9. Copy the journal file somewhere else -// cp /var/log/journal/21282bcb80a74c08a0d14a047372256c/user-1000.journal /tmp/foo.journal -// 10. Read the journal file: -// journalctl --file=/tmp/foo.journal -n 10 -// 11. Read the journal with all fields as JSON -// journalctl --file=/tmp/foo.journal -n 10 -o json -// func TestGenerateJournalEntries(t *testing.T) { -// fields := []map[string]any{ -// { -// "BAR": "bar", -// }, -// { -// "FOO": "foo", -// }, -// { -// "BAR": "bar", -// "FOO": "foo", -// }, -// { -// "FOO_BAR": "foo", -// }, -// { -// "FOO_BAR": "bar", -// }, -// { -// "FOO_BAR": "foo bar", -// }, -// } -// for i, m := range fields { -// if err := journaldlogger.Send(fmt.Sprintf("message %d", i), journaldlogger.PriorityInfo, m); err != nil { -// t.Fatal(err) -// } -// } -// } + ctx, cancelInput := context.WithCancel(context.Background()) + t.Cleanup(cancelInput) + + env.startInput(ctx, inp) + env.waitUntilEventCount(6) +} func TestInputFieldsTranslation(t *testing.T) { // A few random keys to verify diff --git a/filebeat/input/journald/pkg/journalctl/jctlmock_test.go b/filebeat/input/journald/pkg/journalctl/jctlmock_test.go index c9244a5fa43a..4f113d36f104 100644 --- a/filebeat/input/journald/pkg/journalctl/jctlmock_test.go +++ b/filebeat/input/journald/pkg/journalctl/jctlmock_test.go @@ -39,7 +39,7 @@ var _ Jctl = &JctlMock{} // KillFunc: func() error { // panic("mock out the Kill method") // }, -// NextFunc: func(canceler input.Canceler) ([]byte, error) { +// NextFunc: func(canceler input.Canceler) ([]byte, bool, error) { // panic("mock out the Next method") // }, // } @@ -53,7 +53,7 @@ type JctlMock struct { KillFunc func() error // NextFunc mocks the Next method. - NextFunc func(canceler input.Canceler) ([]byte, error) + NextFunc func(canceler input.Canceler) ([]byte, bool, error) // calls tracks calls to the methods. calls struct { @@ -98,7 +98,7 @@ func (mock *JctlMock) KillCalls() []struct { } // Next calls NextFunc. -func (mock *JctlMock) Next(canceler input.Canceler) ([]byte, error) { +func (mock *JctlMock) Next(canceler input.Canceler) ([]byte, bool, error) { if mock.NextFunc == nil { panic("JctlMock.NextFunc: method is nil but Jctl.Next was just called") } diff --git a/filebeat/input/journald/pkg/journalctl/journalctl.go b/filebeat/input/journald/pkg/journalctl/journalctl.go index 54bcb208b829..c0c213329653 100644 --- a/filebeat/input/journald/pkg/journalctl/journalctl.go +++ b/filebeat/input/journald/pkg/journalctl/journalctl.go @@ -24,6 +24,7 @@ import ( "io" "os/exec" "strings" + "sync" input "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/elastic-agent-libs/logp" @@ -37,6 +38,7 @@ type journalctl struct { logger *logp.Logger canceler input.Canceler + waitDone sync.WaitGroup } // Factory returns an instance of journalctl ready to use. @@ -95,7 +97,7 @@ func Factory(canceller input.Canceler, logger *logp.Logger, binary string, args data, err := reader.ReadBytes('\n') if err != nil { if !errors.Is(err, io.EOF) { - logger.Errorf("cannot read from journalctl stdout: %s", err) + logger.Errorf("cannot read from journalctl stdout: '%s'", err) } return } @@ -118,10 +120,13 @@ func Factory(canceller input.Canceler, logger *logp.Logger, binary string, args // Whenever the journalctl process exits, the `Wait` call returns, // if there was an error it is logged and this goroutine exits. + jctl.waitDone.Add(1) go func() { + defer jctl.waitDone.Done() if err := cmd.Wait(); err != nil { jctl.logger.Errorf("journalctl exited with an error, exit code %d ", cmd.ProcessState.ExitCode()) } + jctl.logger.Debugf("journalctl exit code: %d", cmd.ProcessState.ExitCode()) }() return &jctl, nil @@ -130,18 +135,31 @@ func Factory(canceller input.Canceler, logger *logp.Logger, binary string, args // Kill Terminates the journalctl process using a SIGKILL. func (j *journalctl) Kill() error { j.logger.Debug("sending SIGKILL to journalctl") - err := j.cmd.Process.Kill() - return err + return j.cmd.Process.Kill() } -func (j *journalctl) Next(cancel input.Canceler) ([]byte, error) { +// Next returns the next journal entry (as JSON). If `finished` is true, then +// journalctl finished returning all data and exited successfully, if journalctl +// exited unexpectedly, then `err` is non-nil, `finished` is false and an empty +// byte array is returned. +func (j *journalctl) Next(cancel input.Canceler) ([]byte, bool, error) { select { case <-cancel.Done(): - return []byte{}, ErrCancelled + return []byte{}, false, ErrCancelled case d, open := <-j.dataChan: if !open { - return []byte{}, errors.New("no more data to read, journalctl might have exited unexpectedly") + // Wait for the process to exit, so we can read the exit code. + j.waitDone.Wait() + if j.cmd.ProcessState.ExitCode() == 0 { + return []byte{}, true, nil + } + return []byte{}, + false, + fmt.Errorf( + "no more data to read, journalctl exited unexpectedly, exit code: %d", + j.cmd.ProcessState.ExitCode()) } - return d, nil + + return d, false, nil } } diff --git a/filebeat/input/journald/pkg/journalctl/reader.go b/filebeat/input/journald/pkg/journalctl/reader.go index b530e942b237..5e8ef54f5434 100644 --- a/filebeat/input/journald/pkg/journalctl/reader.go +++ b/filebeat/input/journald/pkg/journalctl/reader.go @@ -58,10 +58,25 @@ type JctlFactory func(canceller input.Canceler, logger *logp.Logger, binary stri // //go:generate moq --fmt gofmt -out jctlmock_test.go . Jctl type Jctl interface { - Next(input.Canceler) ([]byte, error) + // Next returns the next journal entry. If there is no entry available + // next will block until there is an entry or cancel is cancelled. + // + // If cancel is cancelled, Next returns a zero value JournalEntry + // and ErrCancelled. + // + // If finished is true, then journalctl returned all messages + // and exited successfully + Next(input.Canceler) (data []byte, finished bool, err error) Kill() error } +type readerState uint8 + +const ( + readingOldEntriesState readerState = iota + followingState +) + // Reader reads entries from journald by calling `jouranlctl` // and reading its output. // @@ -74,36 +89,55 @@ type Jctl interface { // More details can be found in the PR introducing this feature and related // issues. PR: https://github.com/elastic/beats/pull/40061. type Reader struct { - args []string + // logger is the logger for the reader + logger *logp.Logger + + // jctlLogger is the logger for the code controlling + // the journalctl process + jctlLogger *logp.Logger + + // args are arguments for journalctl that never change, + // like the message filters, format, etc + args []string + + // firstRunArgs are the arguments used in the first call to + // journalctl that will be replaced by the cursor argument + // once data has been ingested + firstRunArgs []string + + // cursor is the jornalctl cursor, it is also stored in Filebeat's registry cursor string - logger *logp.Logger canceler input.Canceler jctl Jctl jctlFactory JctlFactory - backoff backoff.Backoff + backoff backoff.Backoff + seekMode SeekMode + state readerState } // handleSeekAndCursor returns the correct arguments for seek and cursor. // If there is a cursor, only the cursor is used, seek is ignored. // If there is no cursor, then seek is used -func handleSeekAndCursor(mode SeekMode, since time.Duration, cursor string) []string { +// The bool parameter indicates whether there might be messages from +// the previous boots +func handleSeekAndCursor(mode SeekMode, since time.Duration, cursor string) ([]string, bool) { if cursor != "" { - return []string{"--after-cursor", cursor} + return []string{"--after-cursor", cursor}, true } switch mode { case SeekSince: - return []string{"--since", time.Now().Add(since).Format(sinceTimeFormat)} + return []string{"--since", time.Now().Add(since).Format(sinceTimeFormat)}, true case SeekTail: - return []string{"--since", "now"} + return []string{"--since", "now"}, false case SeekHead: - return []string{"--no-tail"} + return []string{"--no-tail"}, true default: // That should never happen - return []string{} + return []string{}, false } } @@ -146,7 +180,9 @@ func New( ) (*Reader, error) { logger = logger.Named("reader") - args := []string{"--utc", "--output=json", "--follow"} + + args := []string{"--utc", "--output=json", "--no-pager"} + if file != "" && file != localSystemJournalID { args = append(args, "--file", file) } @@ -171,26 +207,43 @@ func New( args = append(args, "--facility", fmt.Sprintf("%d", facility)) } - otherArgs := handleSeekAndCursor(mode, since, cursor) - - jctl, err := newJctl(canceler, logger.Named("journalctl-runner"), "journalctl", append(args, otherArgs...)...) - if err != nil { - return &Reader{}, err + firstRunArgs, prevBoots := handleSeekAndCursor(mode, since, cursor) + state := readingOldEntriesState // Initial state + if !prevBoots { + state = followingState } r := Reader{ - args: args, - cursor: cursor, - jctl: jctl, - logger: logger, + logger: logger, + jctlLogger: logger.Named("journalctl-runner"), + + args: args, + firstRunArgs: firstRunArgs, + + state: state, + cursor: cursor, + canceler: canceler, jctlFactory: newJctl, backoff: backoff.NewExpBackoff(canceler.Done(), 100*time.Millisecond, 2*time.Second), } + if err := r.newJctl(firstRunArgs...); err != nil { + return &Reader{}, err + } + return &r, nil } +func (r *Reader) newJctl(extraArgs ...string) error { + args := append(r.args, extraArgs...) + + jctl, err := r.jctlFactory(r.canceler, r.jctlLogger, "journalctl", args...) + r.jctl = jctl + + return err +} + // Close stops the `journalctl` process and waits for all // goroutines to return, the canceller passed to `New` should // be cancelled before `Close` is called @@ -210,25 +263,49 @@ func (r *Reader) Close() error { // If cancel is cancelled, Next returns a zero value JournalEntry // and ErrCancelled. func (r *Reader) Next(cancel input.Canceler) (JournalEntry, error) { - d, err := r.jctl.Next(cancel) + msg, finished, err := r.jctl.Next(cancel) // Check if the input has been cancelled select { case <-cancel.Done(): - // Input has been cancelled, ignore the message? - return JournalEntry{}, err + // The caller is responsible for calling Reader.Close to terminate + // journalctl. Cancelling this canceller only means this Next call was + // cancelled. Because the input has been cancelled, we ignore the message + // and any error it might have returned. + return JournalEntry{}, ErrCancelled default: - // Two options: - // - No error, go parse the message - // - Error, journalctl is not running any more, restart it + // Three options: + // - Journalctl finished reading messages from previous boots + // successfully, restart it with --follow flag. + // - Error, journalctl exited with an error, restart it in the same + // mode it was running. + // - No error, skip the default block and go parse the message + + var extraArgs []string + var restart bool + + // First of all: handle the error, if any if err != nil { r.logger.Warnf("reader error: '%s', restarting...", err) - // Copy r.args and if needed, add the cursor flag - args := append([]string{}, r.args...) - if r.cursor != "" { - args = append(args, "--after-cursor", r.cursor) + restart = true + + if r.cursor == "" && r.state == readingOldEntriesState { + // Corner case: journalctl exited with an error before reading the + // 1st message. This means we don't have a cursor and need to restart + // it with the initial arguments. + extraArgs = append(extraArgs, r.firstRunArgs...) + } else if r.cursor != "" { + // There is a cursor, so just append it to our arguments + extraArgs = append(extraArgs, "--after-cursor", r.cursor) + + // Last, but not least, add "--follow" if we're in following mode + if r.state == followingState { + extraArgs = append(extraArgs, "--follow") + } } + // Handle backoff + // // If the last restart (if any) was more than 5s ago, // recreate the backoff and do not wait. // We recreate the backoff so r.backoff.Last().IsZero() @@ -239,49 +316,91 @@ func (r *Reader) Next(cancel input.Canceler) (JournalEntry, error) { } else { r.backoff.Wait() } + } + + // If journalctl finished reading the messages from previous boots + // and exited successfully + if finished { + restart = true + extraArgs = append(extraArgs, "--follow") + if r.cursor != "" { + // If there is a cursor, only use the cursor and the follow argument + extraArgs = append(extraArgs, "--after-cursor", r.cursor) + } else { + // If there is no cursor, it means the first successfully run + // did not return any event, so we have to restart with the + // --follow and all the initial args. + + extraArgs = append(extraArgs, r.firstRunArgs...) + } + + r.state = followingState + r.logger.Info("finished reading journal entries from all boots, restarting journalctl with follow flag") + } - jctl, err := r.jctlFactory(r.canceler, r.logger.Named("journalctl-runner"), "journalctl", args...) - if err != nil { + // Restart journalctl if needed + if restart { + if err := r.newJctl(extraArgs...); err != nil { // If we cannot restart journalct, there is nothing we can do. return JournalEntry{}, fmt.Errorf("cannot restart journalctl: %w", err) } - r.jctl = jctl // Return an empty message and wait for the input to call us again return JournalEntry{}, ErrRestarting } } + return r.handleMessage(msg) +} + +func (r *Reader) handleMessage(msg []byte) (JournalEntry, error) { fields := map[string]any{} - if err := json.Unmarshal(d, &fields); err != nil { - r.logger.Error("journal event cannot be parsed as map[string]any, look at the events log file for the raw journal event") + if err := json.Unmarshal(msg, &fields); err != nil { + r.logger.Error("journal event cannot be parsed as map[string]any, " + + "look at the events log file for the raw journal event") + // Log raw data to events log file - msg := fmt.Sprintf("data cannot be parsed as map[string]any JSON: '%s'", string(d)) - r.logger.Errorw(msg, logp.TypeKey, logp.EventType, "error.message", err.Error()) + msg := fmt.Sprintf("data cannot be parsed as map[string]any. Data: '%s'", + string(msg)) + r.logger.Errorw( + msg, + "error.message", err.Error(), + logp.TypeKey, logp.EventType) + return JournalEntry{}, fmt.Errorf("cannot decode Journald JSON: %w", err) } ts, isString := fields["__REALTIME_TIMESTAMP"].(string) if !isString { - return JournalEntry{}, fmt.Errorf("'__REALTIME_TIMESTAMP': '%[1]v', type %[1]T is not a string", fields["__REALTIME_TIMESTAMP"]) + return JournalEntry{}, + fmt.Errorf("'__REALTIME_TIMESTAMP': '%[1]v', type %[1]T is not a string", + fields["__REALTIME_TIMESTAMP"]) } unixTS, err := strconv.ParseUint(ts, 10, 64) if err != nil { - return JournalEntry{}, fmt.Errorf("could not convert '__REALTIME_TIMESTAMP' to uint64: %w", err) + return JournalEntry{}, + fmt.Errorf("could not convert '__REALTIME_TIMESTAMP' to uint64: %w", + err) } monotomicTs, isString := fields["__MONOTONIC_TIMESTAMP"].(string) if !isString { - return JournalEntry{}, fmt.Errorf("'__MONOTONIC_TIMESTAMP': '%[1]v', type %[1]T is not a string", fields["__MONOTONIC_TIMESTAMP"]) + return JournalEntry{}, + fmt.Errorf("'__MONOTONIC_TIMESTAMP': '%[1]v', type %[1]T is not a string", + fields["__MONOTONIC_TIMESTAMP"]) } monotonicTSInt, err := strconv.ParseUint(monotomicTs, 10, 64) if err != nil { - return JournalEntry{}, fmt.Errorf("could not convert '__MONOTONIC_TIMESTAMP' to uint64: %w", err) + return JournalEntry{}, + fmt.Errorf("could not convert '__MONOTONIC_TIMESTAMP' to uint64: %w", + err) } cursor, isString := fields["__CURSOR"].(string) if !isString { - return JournalEntry{}, fmt.Errorf("'_CURSOR': '%[1]v', type %[1]T is not a string", fields["_CURSOR"]) + return JournalEntry{}, + fmt.Errorf("'_CURSOR': '%[1]v', type %[1]T is not a string", + fields["_CURSOR"]) } // Update our cursor so we can restart journalctl if needed diff --git a/filebeat/input/journald/pkg/journalctl/reader_test.go b/filebeat/input/journald/pkg/journalctl/reader_test.go index af3837fd09c1..f1c5f3bf4bca 100644 --- a/filebeat/input/journald/pkg/journalctl/reader_test.go +++ b/filebeat/input/journald/pkg/journalctl/reader_test.go @@ -48,8 +48,8 @@ func TestEventWithNonStringData(t *testing.T) { for idx, rawEvent := range testCases { t.Run(fmt.Sprintf("test %d", idx), func(t *testing.T) { mock := JctlMock{ - NextFunc: func(canceler input.Canceler) ([]byte, error) { - return rawEvent, nil + NextFunc: func(canceler input.Canceler) ([]byte, bool, error) { + return rawEvent, false, nil }, } r := Reader{ @@ -72,8 +72,8 @@ func TestRestartsJournalctlOnError(t *testing.T) { ctx := context.Background() mock := JctlMock{ - NextFunc: func(canceler input.Canceler) ([]byte, error) { - return jdEvent, errors.New("journalctl exited with code 42") + NextFunc: func(canceler input.Canceler) ([]byte, bool, error) { + return jdEvent, false, errors.New("journalctl exited with code 42") }, } @@ -90,8 +90,8 @@ func TestRestartsJournalctlOnError(t *testing.T) { // If calls have been made, change the Next function to always succeed // and return it - mock.NextFunc = func(canceler input.Canceler) ([]byte, error) { - return jdEvent, nil + mock.NextFunc = func(canceler input.Canceler) ([]byte, bool, error) { + return jdEvent, false, nil } return &mock, nil diff --git a/filebeat/input/journald/testdata/multiple-boots.export b/filebeat/input/journald/testdata/multiple-boots.export new file mode 100644 index 000000000000..91e5488470b4 --- /dev/null +++ b/filebeat/input/journald/testdata/multiple-boots.export @@ -0,0 +1,86 @@ +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=1;b=0ffe5f74a4bd49ca8597eb05fe1a512a;m=39f445;t=6225212a5b6da;x=3f056d2626450d83 +__REALTIME_TIMESTAMP=1726585755776730 +__MONOTONIC_TIMESTAMP=3798085 +_BOOT_ID=0ffe5f74a4bd49ca8597eb05fe1a512a +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +PRIORITY=5 +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +MESSAGE=Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=2;b=0ffe5f74a4bd49ca8597eb05fe1a512a;m=39f452;t=6225212a5b6e7;x=67b36f81fa43ba68 +__REALTIME_TIMESTAMP=1726585755776743 +__MONOTONIC_TIMESTAMP=3798098 +_BOOT_ID=0ffe5f74a4bd49ca8597eb05fe1a512a +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system +PRIORITY=6 +MESSAGE=Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet + +Sep 17 11:26:36 Debian12 kernel: Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +Sep 17 11:26:36 Debian12 kernel: Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=22e3;b=457105b2d84547a4b4549f0eaa700b61;m=35bc29;t=6227ecec5b11f;x=a46eaad8c3930985 +__REALTIME_TIMESTAMP=1726777890550047 +__MONOTONIC_TIMESTAMP=3521577 +_BOOT_ID=457105b2d84547a4b4549f0eaa700b61 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +PRIORITY=5 +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +MESSAGE=Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=22e4;b=457105b2d84547a4b4549f0eaa700b61;m=35bc37;t=6227ecec5b12d;x=fcd8a87f1f95be6e +__REALTIME_TIMESTAMP=1726777890550061 +__MONOTONIC_TIMESTAMP=3521591 +_BOOT_ID=457105b2d84547a4b4549f0eaa700b61 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system +PRIORITY=6 +MESSAGE=Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=451d;b=e2fca45429e54522bb2927112eb8e0b5;m=2aad67;t=6228fba6fbe98;x=ab82fca7956545cf +__REALTIME_TIMESTAMP=1726850563817112 +__MONOTONIC_TIMESTAMP=2796903 +_BOOT_ID=e2fca45429e54522bb2927112eb8e0b5 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +PRIORITY=5 +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +MESSAGE=Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=451e;b=e2fca45429e54522bb2927112eb8e0b5;m=2aad75;t=6228fba6fbea7;x=f334fe004963f224 +__REALTIME_TIMESTAMP=1726850563817127 +__MONOTONIC_TIMESTAMP=2796917 +_BOOT_ID=e2fca45429e54522bb2927112eb8e0b5 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system +PRIORITY=6 +MESSAGE=Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet + diff --git a/filebeat/input/journald/testdata/multiple-boots.journal b/filebeat/input/journald/testdata/multiple-boots.journal new file mode 100644 index 0000000000000000000000000000000000000000..668b82162d6a33e396c910fdfb9f6621ca4fd36e GIT binary patch literal 8388608 zcmeF(YmA&%c>v%sz9b& zv3DI`k`$ySK$8Z6>QDqRv?)|FN|h=k5FD96)2b9fsQ_11t0+IhmMSeu70GQWO6;xE z+4;^J#3Zh7!SdztkG%8Ech390=Q)>e>>pV@{QlAY{*nG%^Up=||4Dz=KeYGlJFe=# z`~HtT_>n^o|H_Bnde6cQKbw00EuXmKn_v6S5B||V{&;TBSm$%KE56aW?9%1?X7{$Y z&n-xaUcRgK;lNw6>%0H8()zG8-QWMK+4n!WsM`9_lU?SPw8cobKbN(8=7}%8dE|;E zxrd&bd2&}X?tJ8`R*y?kzc;2B;jQtZEiO*iv#E}@+h_gd(nD!JQatMie3Svc_`jJ) z#G^EK`#1&ibD3|%_U@0R`w`pkpYGSY4`<_xc)az_zLfQkc)a~z@AkXXxe)R7&MzN< z(^;V70eEK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UI97rEhbyh75s$T>b_56zAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009DLhrqf8)z&~Fo*kny3IYTO5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfWYe~(0{1XN*3|;%PbQC0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C7&Mbj_2ebcQYs53FdessjK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7&OL$k zFIQTFj(F|~7%>3?1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAaJq*FP>Lz zrHFX4`Sd41fB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5ID01 z&R*GRBb^-q(XoI0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C7P6vVS9;&p;h62oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+0D;p-U_mNnD#gnNu3Y`ucR%wl z?G+w)xzhUTz#FQq_=ePA{Jld%W8(wY^%f>48{LyV<$8HyW4XGaUg@s(G$sn&lZ|4% zyI88H_R+79uK#D*C%?Y#^5;LAE^{xQm*tPVQQs}8!DV0g{eN4%=d!1>l_QfFsG}z(IJ|S|pL}lFC-RZ+yN9y*YU`hide)_egL^LCwe*oIUxK(Y=q3%tXF*bH7l}&-o2TYl9$j@qt#|d*^v*l;w>P$JpPHG@uUuYSUg#=ych$FSTv^U{ zY-~(S)u+3%mGz#@Q`>iJU!Hxpd}iCG&V0wF4I8?OrF_SAt%mtxX?dxcpjhrqAAci% z!>CBgwmkXWwcNX)bgTwjZky>qVcx?Q-o7Suv z9Oz4pT6Gl*E4x-K9G-1F?`qhG|R=g#(eM%Ferz+i2Bv~Os**1P?8x9@0dX?BWwR;C1RyJP>C5AGU# zH1e%a?~n0c=%35Qc{7_DMt#pb@ufG9T(Klq`)M|BZ#q1_W6xg>eD9tI{$ugsx8Hcx z$1bkD>s?zGU%U0C%st*)abCxJBF@J+kNea45a(f>7jZttevR|tK-#~tFJqo#y@hkk zYm75oW4&cn=5qZPWaA0nrRjZ|Uz~lvHSO$1L*3B znVB6|-@c`JW9Nsu+76&?&3nCT*EaXsib}aytyU(w%8g2;t6W-9?5eKVQ0ZDR+1zmx z-Q{Yjv?9N4CV$(`smA=vCB`$G3XOI4?`eIE9ASOj`efh1kIX#2>3G-2(bmK9&VRfY zWB%eC3;*`@5%UrA5ZCRoz2Dt$%+^ozYhO>do}b-s@Avcx_A6X?V$Zwp1pCdNV86;m zCsybE$Jj6SRs4Sb(c?Wg{`64p=9sToe=#3_o%U6%PD zbTd>Z3ypfcr%;$!8RvZaeu(SKzw+%L{9g0C?EGf-gROSk;;%kkyJ64Q#=daL!&@(` z-*dsIzxDLP(|b<0^YO9tehIhs^%;)g7wb5_k9eZ}!f!SuT9jf}^6WW+=k!ybx%{p_ ze|qj{o~QaeIabej&wMexkHRtHkvvbfU*vC(?fdA)1)sa~!$0`hZ$`TqSIqxU(*Ata zK5CREH`JSZr&Mipm%B@)iHTCR)Ke^$8WWX9VIuu?=2h>N#pyx2pPG4cS92e2dL(<* z{&;TBSm$%KE56aW?9%1?X7~QE*7(drb07VSqs@D~pHJ2AjQD$VxW)Xf3&0e^?@06B z*8EkM?fWI3^H6fzc?7pB|7*iQ?r{0PkLGr&_sg-mov2p;hEw}|$eRhBs}Aw^f>kxTF0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5;&|6_*^1ONa40D$~o8zI4g0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!Ka6mhBAOHXW03gWUdXNht0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK ffB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFz^}(96ZF* literal 0 HcmV?d00001 diff --git a/filebeat/input/journald/testdata/ndjson-parser.export b/filebeat/input/journald/testdata/ndjson-parser.export new file mode 100644 index 0000000000000000000000000000000000000000..0a24b593f776c6d9855902cdb759b7e578756830 GIT binary patch literal 830 zcmah{U2oeU6x}oX6IGtqmQUL>;=yxhYAH6B!Cm`=z{XOfHJR#ETQ%*!4>wDdhfU)F zAvovWb1ui)qHDEk#UvPlI2oyTJ#{p&;0S|5MiS~%ObBz!tze7}OD_T*dq)|?JTS^I z_8xPbB7}p_0`zS8Bv22Nl#Y&M%Y#TT?Gu!c2n>ArLm*7>FraY=-noQhU~Mb&jVUW> z=S`cuY6MDS)r>$!GlUUIBdSuhGO8|%a~Osswn8LB39z4)GPYd(S0ZTIQnjUd6p_?L z0)h|%yM8|vd1K}6tt?D=FGYOPa#PmbgDq5T$HXVDO2F?fX zGs=+nG|(8T)Imr&&f~^gTQ;ZYcf*Y%_mn3~?Mm Lp{h#s?f}4VYs%{M literal 0 HcmV?d00001 diff --git a/filebeat/input/journald/testdata/ndjson-parser.journal b/filebeat/input/journald/testdata/ndjson-parser.journal new file mode 100644 index 0000000000000000000000000000000000000000..aa4aa7960f3aab76aee57378dbd9ea04f5f6a2d0 GIT binary patch literal 8388608 zcmeF(e{9_4c>r)HPE59T;4s$E(I2VHhSHH>U+nmf&TCwA;Dh+b>@%T^&e)Dq0wsyC zLs)@@G9gsjZXUF>>b5GKx*w>OnpI7Q7N&KZ)=5?Af?Cu}YpKOX3sY36TCos9GW+g* z?Aqw!S_vU>KKX;^?!NEyywCf&?~nH)U_^ylc!&5!+M_L`r5 z`QP9C@!~UgePqRb)k80Cx#s;J-TBy8esar$-+ubY=ZmYJ$=B^$b;BS4bR(^lae`iZCYSZ!k7o^XZQ?T`-n@^i}BP)eEwR+@x$fyM?BGaZcOzeo@l+3oj?9^T5pQ;t$$<6 zi#XqW6J^$G@-?CTA-*?0lUH|UtbU#V)MC;9!zdPNZBF;5_zQ?mi%RUk3 zn?HMe{i3o@#M#GR@Au;`mDjsJt^4)v=WL&UWqK}0Jlj32{|pmI`{k;LXIT3Sz1^>+ z=TXGj>%C)f`FRy__Hpk|OyJXL{}u7Xs>xpH0?X3AI^sfK?`yrjC24}O^!Ki>^?d3R zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&Un2*4L>Rf4f#QEeJOMn0Y0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5Fl{s1-2i}mL`pO z>N_wa0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfWYf1u>GZMY5It-XD{X^K!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oN~=0yiznm5N3@`7@|U zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5Fl_S3+!K-E43f-OkRuL2@oJafB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5Fl_ofkTUOrNa@AH`Xx%1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7e?FCwt#NVYUA;xCe83;_ZJ2oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&we1Y0y z<^TRH;=*6C6%rsofB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyaIys2 zt8%4M5l>cKbqEk3K!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0tC)XfeVjh zON~c7GgqN&0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXTqCoB8Y-w2$7vdVNi2wlt1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5IDZTp2fM+ zfQZK*?I-~P1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1WsRp16Ae!pGw5jw^N-FAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkL{>=GDTk}EAZ;@P!&t0h2y009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!CvME3p2RY^k}3 zr*Ee^BS3%v0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t6PSz}U;#($XR>)OA`H0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkL{R0)h7PXB&pil=H`^AI3FfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyFt$(2$f&Ntt9 z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAaFVe)V`c8wGr`j=t&O*2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zKw!QC>#K96l!)`qH=Y0i0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAVA>c3Osr& zTPhgwwj-o zSFyivMaytz^Uy$JQ)4DK+|<~V$qWu=a+&6ahRpC_cDR19DP=ECu|CDe7w>(;PwvT& z?mwC>eX{+fa*Vn$&(&%CU+li?sg)PBe=ohA3YGfn?kRNj6pHU_Syvu9d;S;uE_nW; z_ILeP6skR3oPYD0KE+*WcFFSRxyxxy-5)KE*Wf4F}h>9=Pa%w?|&YihZ^AT~R3ZZ|J&e zqPw5_s-mCfRN%W09ee7Wuk>z?dV8XunfX)S*TyvPo7G3Zb>G7u*%5gWEB4d-zTRTK z9aTbxjYs4z^;pG4;HSxGwQ8>C46S*mq&!t@27;?<$rVW(rXlj*;QpR!?45Yeqh}WoUT%`-}5x-;?dt%MM=haMV38yT4MM>GSIM(!ico z&;4uN_1|cSyohu5)z>@0Rc>x|G&PYNZtqTo-m~Hz>PdZsbR-*k4C$TYI51KY5ED*p$r^)3~{Q zV7Pv5rhZ_uR`k0z6>k3A{WqQSy*+DUp4wySd!DXaIuw2Oq=Db)zWUSOIkfYukr%OI zpY2_}#m?4_e9INXgIfl+He|{f(NBCKzx9LXA3N{C&X}(1(dZ}g(?l_#FTMZb!SlZM zo_oKzH~NTJv7es4&SIe>-{0HT)tzq{`Gb+s;aew?qMta=&VO;my)7HZ-x~GWtIE&G z{u%kx=UF^hf8%54|JlZso2Q;TkzcW&Voz&lZ+BNuv1Md*)3zO>DIxkvTvgjU-tpu2 zZ+K&C)H@LUOy!r4girLd{JNSWKmFNv_C;RAiv5)DJH7ed-a=PrOEx^BpLoA~*NQ{; zU3~e}^YW%e$!A9Xw9j2>;!iyE;oE>nnJ@k03y;UVhobM9^Gw}ud(*&W zN3Py``|iP2kr#2!zGq2>V?2)z{pu$>ceR{*XVlxXI5|c?DL=l~xX)gC|KINIf7kgR zj=YF-_S4r{m^js5B^7vn;96~w02W=?LcNIJ2c#o%jBl6 z*WXDKjK23{-~ZpXCmx9T4$SU%M*Zo&Z%GNy?r8j#?4mEMkNk+0`!4q6TRSFuZQeYT zYaY(!HVqDD8=D#i2OEbQ>zf*z2kPq^bB($36%)R3JwN>9XSe@s``ez3dB&E6Z`3Ux z3Eu$Ct?TRA`S=&!8F>*a_1Q6TA-7(YZ@IH>^R{hum#?fF9JsCS(v@`sTSs;d-&S{d zW2U@Ci~cT41q)klT>hiCjqZ;6*`?+FQoX6~J@)TEJXkpQmUHg^zZf5}QeXYulbfP- z4eQbkCFYO)^S@krcilCA^uXUu)k*VD=1qPUF0p@q@zz$CO)iA2lk{!qMzsXW#07li)*T~hevDP zGBM`qBcCs>dM01DZ`CEs*E~AD_rbsZho|2D{ml9CdnkUF#qX2&9TdM8;&(~x-{W^e z?5AVj9{bqXKgT{Y_N}pxi+yA4n_|Bf`<2)y#eN|6CGk9t=YKp8(s^B|r- zai5L*f86Kdz8d$JxUa?a8~2mAKH@r!^D(Z6{ptLO^Dw@zI4^3__Z9cu^!8QfM|>_4 zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!CvivqJ|000000K>n|dkl?_90|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj! z0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^` zz<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!K zaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB) z95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c z2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*= zfddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede z;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQq zIB?*=fddB)95`^`z<~n?4jede;J|?c2M!!KaNxj!0|yQqIB?*=fddB)95`^`z<~n? z4jede;J|?c2M!!KpdC6E00000802p~j6H>r0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 hV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwQ4g_I%88rX^ literal 0 HcmV?d00001 diff --git a/filebeat/tests/integration/journald_test.go b/filebeat/tests/integration/journald_test.go index 447e49b82bb7..712d2db4871a 100644 --- a/filebeat/tests/integration/journald_test.go +++ b/filebeat/tests/integration/journald_test.go @@ -75,7 +75,7 @@ func generateJournaldLogs(t *testing.T, ctx context.Context, syslogID string, ma //go:embed testdata/filebeat_journald.yml var journaldInputCfg string -func TestJournaldInput(t *testing.T) { +func TestJournaldInputRunsAndRecoversFromJournalctlFailures(t *testing.T) { filebeat := integration.NewBeat( t, "filebeat", @@ -90,9 +90,12 @@ func TestJournaldInput(t *testing.T) { filebeat.WriteConfigFile(yamlCfg) filebeat.Start() + // On a normal execution we run journalclt twice, the first time to read all messages from the + // previous boot until 'now' and the second one with the --follow flag that should keep on running. + filebeat.WaitForLogs("journalctl started with PID", 10*time.Second, "journalctl did not start") filebeat.WaitForLogs("journalctl started with PID", 10*time.Second, "journalctl did not start") - pidLine := filebeat.GetLogLine("journalctl started with PID") + pidLine := filebeat.GetLastLogLine("journalctl started with PID") logEntry := struct{ Message string }{} if err := json.Unmarshal([]byte(pidLine), &logEntry); err != nil { t.Errorf("could not parse PID log entry as JSON: %s", err) @@ -105,7 +108,7 @@ func TestJournaldInput(t *testing.T) { // Kill journalctl if err := syscall.Kill(pid, syscall.SIGKILL); err != nil { - t.Fatalf("coluld not kill journalctl with PID %d: %s", pid, err) + t.Fatalf("coluld not kill journalctl with PID %d: %s", pid, err) } go generateJournaldLogs(t, context.Background(), syslogID, 5) diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 9b8002f1176f..8adbc18959d3 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -33,6 +33,7 @@ import ( "os/exec" "path/filepath" "regexp" + "slices" "strconv" "strings" "sync" @@ -430,6 +431,29 @@ func (b *BeatProc) GetLogLine(s string) string { return line } +// GetLastLogLine search for the string s starting at the end +// of the logs, if it is found the whole log line is returned, otherwise +// an empty string is returned. GetLastLogLine does not keep track of +// any offset. +func (b *BeatProc) GetLastLogLine(s string) string { + logFile := b.openLogFile() + defer logFile.Close() + + found, line := b.searchStrInLogsReversed(logFile, s) + if found { + return line + } + + eventLogFile := b.openEventLogFile() + if eventLogFile == nil { + return "" + } + defer eventLogFile.Close() + _, line = b.searchStrInLogsReversed(eventLogFile, s) + + return line +} + // searchStrInLogs search for s as a substring of any line in logFile starting // from offset. // @@ -471,6 +495,44 @@ func (b *BeatProc) searchStrInLogs(logFile *os.File, s string, offset int64) (bo return false, offset, "" } +// searchStrInLogs search for s as a substring of any line in logFile starting +// from offset. +// +// It will close logFile and return the current offset. +func (b *BeatProc) searchStrInLogsReversed(logFile *os.File, s string) (bool, string) { + t := b.t + + defer func() { + if err := logFile.Close(); err != nil { + // That's not quite a test error, but it can impact + // next executions of LogContains, so treat it as an error + t.Errorf("could not close log file: %s", err) + } + }() + + r := bufio.NewReader(logFile) + lines := []string{} + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + t.Fatalf("error reading log file '%s': %s", logFile.Name(), err) + } + break + } + lines = append(lines, line) + } + + slices.Reverse(lines) + for _, line := range lines { + if strings.Contains(line, s) { + return true, line + } + } + + return false, "" +} + // WaitForLogs waits for the specified string s to be present in the logs within // the given timeout duration and fails the test if s is not found. // msgAndArgs should be a format string and arguments that will be printed From 794b83293203fa9d5eb2fb7f438cccd6fa97dd2d Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Thu, 24 Oct 2024 06:02:20 +1030 Subject: [PATCH 73/90] x-pack/filebeat/input/{cel,httpjson}: fix flaky test (#41358) Do not use uncontrolled randomisation in tests where it is not necessary. The retry tests in the httpjson and cel packages were using a randomised 5xx HTTP status code to trigger the retry behaviour of the the go-retryablehttp package. This has the unfortunate consequence of causing 2% of test runs to fail. The reason for this is given in the go-retryablehttp documentation[1]: Mainly, if an error is returned by the client (connection errors, etc.), or if a 500-range response code is received (except 501), then a retry is invoked after a wait period. Since the package is already tested, and is documented to accept all 5xx status codes except 501 to cause a retry, just use 500. [1]https://pkg.go.dev/github.com/hashicorp/go-retryablehttp@v0.7.7#section-readme --- CHANGELOG-developer.next.asciidoc | 1 + x-pack/filebeat/input/cel/input_test.go | 4 ++-- x-pack/filebeat/input/httpjson/input_test.go | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 01a7205e713c..0e64ed2754c6 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -106,6 +106,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Close connections properly in Filbeat's HTTPJSON input. {pull}39790[39790] - Add the Offset property to libbeat/reader.Message to store the total number of bytes read and discarded before generating the message. This enables inputs to accurately determine how much data has been read up to the message, using Message.Bytes + Message.Offset. {pull}39873[39873] {issue}39653[39653] - AWS CloudWatch Metrics record previous endTime to use for next collection period and change log.logger from cloudwatch to aws.cloudwatch. {pull}40870[40870] +- Fix flaky test in cel and httpjson inputs of filebeat. {issue}40503[40503] {pull}41358[41358] ==== Added diff --git a/x-pack/filebeat/input/cel/input_test.go b/x-pack/filebeat/input/cel/input_test.go index 1667fe7c2828..9e4fe746d76c 100644 --- a/x-pack/filebeat/input/cel/input_test.go +++ b/x-pack/filebeat/input/cel/input_test.go @@ -10,7 +10,6 @@ import ( "flag" "fmt" "io" - "math/rand" "net/http" "net/http/httptest" "net/url" @@ -1947,7 +1946,8 @@ func retryHandler() http.HandlerFunc { w.Write([]byte(`{"hello":"world"}`)) return } - w.WriteHeader(rand.Intn(100) + 500) + // Any 5xx except 501 will result in a retry. + w.WriteHeader(500) count++ } } diff --git a/x-pack/filebeat/input/httpjson/input_test.go b/x-pack/filebeat/input/httpjson/input_test.go index 4f09d8f057f9..1416efa3c788 100644 --- a/x-pack/filebeat/input/httpjson/input_test.go +++ b/x-pack/filebeat/input/httpjson/input_test.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "math/rand" "net/http" "net/http/httptest" "os" @@ -1724,7 +1723,8 @@ func retryHandler() http.HandlerFunc { _, _ = w.Write([]byte(`{"hello":"world"}`)) return } - w.WriteHeader(rand.Intn(100) + 500) + // Any 5xx except 501 will result in a retry. + w.WriteHeader(500) count += 1 } } From 0024b2ce327285e22b369ebae42eaa5e9e132f2c Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Wed, 23 Oct 2024 23:29:05 -0700 Subject: [PATCH 74/90] [add_session_metadata] Always use correct code for backend in use. (#41410) With the add_session_metadata processor, the config backend option and actual backend in use doesn't always match; the 'auto' option doesn't match a real backend (kernel_tracing, procfs). This fixes some logic so that when the 'auto' option is used, the processor will always follow the code path intended for whatever the actual backend is use is. --- .../sessionmd/add_session_metadata.go | 52 +++++++++++-------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index a4646b6b6685..a01c80643256 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -26,8 +26,10 @@ import ( ) const ( - processorName = "add_session_metadata" - logName = "processor." + processorName + processorName = "add_session_metadata" + logName = "processor." + processorName + procfsType = "procfs" + kernelTracingType = "kernel_tracing" ) // InitializeModule initializes this module. @@ -36,13 +38,14 @@ func InitializeModule() { } type addSessionMetadata struct { - ctx context.Context - cancel context.CancelFunc - config config - logger *logp.Logger - db *processdb.DB - provider provider.Provider - backend string + ctx context.Context + cancel context.CancelFunc + config config + logger *logp.Logger + db *processdb.DB + provider provider.Provider + backend string + providerType string } func New(cfg *cfg.C) (beat.Processor, error) { @@ -61,51 +64,56 @@ func New(cfg *cfg.C) (beat.Processor, error) { return nil, fmt.Errorf("failed to create DB: %w", err) } - if c.Backend != "kernel_tracing" { - backfilledPIDs := db.ScrapeProcfs() - logger.Infof("backfilled %d processes", len(backfilledPIDs)) - } - var p provider.Provider + var pType string switch c.Backend { case "auto": p, err = kerneltracingprovider.NewProvider(ctx, logger) if err != nil { // Most likely cause of error is not supporting ebpf or kprobes on system, try procfs + backfilledPIDs := db.ScrapeProcfs() + logger.Infof("backfilled %d processes", len(backfilledPIDs)) p, err = procfsprovider.NewProvider(ctx, logger, db, reader, c.PIDField) if err != nil { cancel() return nil, fmt.Errorf("failed to create provider: %w", err) } logger.Info("backend=auto using procfs") + pType = procfsType } else { logger.Info("backend=auto using kernel_tracing") + pType = kernelTracingType } case "procfs": + backfilledPIDs := db.ScrapeProcfs() + logger.Infof("backfilled %d processes", len(backfilledPIDs)) p, err = procfsprovider.NewProvider(ctx, logger, db, reader, c.PIDField) if err != nil { cancel() return nil, fmt.Errorf("failed to create procfs provider: %w", err) } + pType = procfsType case "kernel_tracing": p, err = kerneltracingprovider.NewProvider(ctx, logger) if err != nil { cancel() return nil, fmt.Errorf("failed to create kernel_tracing provider: %w", err) } + pType = kernelTracingType default: cancel() return nil, fmt.Errorf("unknown backend configuration") } return &addSessionMetadata{ - ctx: ctx, - cancel: cancel, - config: c, - logger: logger, - db: db, - provider: p, - backend: c.Backend, + ctx: ctx, + cancel: cancel, + config: c, + logger: logger, + db: db, + provider: p, + backend: c.Backend, + providerType: pType, }, nil } @@ -161,7 +169,7 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { } var fullProcess types.Process - if p.backend == "kernel_tracing" { + if p.providerType == kernelTracingType { // kernel_tracing doesn't enrich with the processor DB; process info is taken directly from quark cache proc, err := p.provider.GetProcess(pid) if err != nil { From 2d193a217a07c1adb2deae000bd6034003fba2b2 Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Thu, 24 Oct 2024 18:49:17 +1100 Subject: [PATCH 75/90] x-pack/filebeat/input/entityanalytics: fix encoding of client_secret (#41393) In the Azure Active Directory provider, only encode the value of `client_secret` once. --------- Co-authored-by: Pierre HILBERT --- CHANGELOG.next.asciidoc | 1 + .../provider/azuread/authenticator/oauth2/oauth2.go | 2 +- .../azuread/authenticator/oauth2/oauth2_test.go | 10 ++++++---- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e583113b0ab4..91ccd8e5a490 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -169,6 +169,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Log bad handshake details when websocket connection fails {pull}41300[41300] - Improve modification time handling for entities and entity deletion logic in the Active Directory entityanalytics input. {pull}41179[41179] - Journald input now can read events from all boots {issue}41083[41083] {pull}41244[41244] +- Fix double encoding of client_secret in the Entity Analytics input's Azure Active Directory provider {pull}41393[41393] *Heartbeat* diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go index 1e134842dcc2..f4c38fc909cb 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go @@ -74,7 +74,7 @@ func (a *oauth2) renewToken(ctx context.Context) error { reqValues := url.Values{ "client_id": []string{a.conf.ClientID}, "scope": a.conf.Scopes, - "client_secret": []string{url.QueryEscape(a.conf.Secret)}, + "client_secret": []string{a.conf.Secret}, "grant_type": []string{"client_credentials"}, } reqEncoded := reqValues.Encode() diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go index 1ec9d7dad45a..1d4da19292e5 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go @@ -18,7 +18,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -func testSetupServer(t *testing.T, tokenValue string, expiresIn int) *httptest.Server { +func testSetupServer(t *testing.T, expectedClientSecret string, tokenValue string, expiresIn int) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { payload := authResponse{ TokenType: "Bearer", @@ -28,6 +28,7 @@ func testSetupServer(t *testing.T, tokenValue string, expiresIn int) *httptest.S } data, err := json.Marshal(payload) require.NoError(t, err) + require.Equal(t, expectedClientSecret, r.FormValue("client_secret")) _, err = w.Write(data) require.NoError(t, err) @@ -62,12 +63,13 @@ func TestRenew(t *testing.T) { value := "test-value" expiresIn := 1000 - srv := testSetupServer(t, value, expiresIn) + clientSecret := "value&chars=to|escape" // #nosec G101 + srv := testSetupServer(t, clientSecret, value, expiresIn) defer srv.Close() cfg, err := config.NewConfigFrom(&conf{ Endpoint: "http://" + srv.Listener.Addr().String(), - Secret: "value", + Secret: clientSecret, ClientID: "client-id", TenantID: "tenant-id", }) @@ -90,7 +92,7 @@ func TestRenew(t *testing.T) { cachedToken := "cached-value" expireTime := time.Now().Add(1000 * time.Second) - srv := testSetupServer(t, cachedToken, 1000) + srv := testSetupServer(t, "no-client-secret-used", cachedToken, 1000) defer srv.Close() cfg, err := config.NewConfigFrom(&conf{ From 4140d15e57a61be38c739d60da8af2f0d8db3a45 Mon Sep 17 00:00:00 2001 From: Denis Date: Thu, 24 Oct 2024 09:58:21 +0200 Subject: [PATCH 76/90] Switch crossbuilding to Debian 11 (#41402) We're dropping support for Debian 10, so no need to crossbuild using the outdated image anymore. The old linker in Debian 10 caused a packaging issue with some Go dependency updates https://github.com/elastic/beats/issues/41270 So, this update should also help with that. This also updates the statically linked glibc from 2.28 to 2.31. --- CHANGELOG.next.asciidoc | 1 + dev-tools/mage/crossbuild.go | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 91ccd8e5a490..9603360efcae 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -14,6 +14,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Beats won't log start up information when running under the Elastic Agent {40390}40390[40390] - Filebeat now needs `dup3`, `faccessat2`, `prctl` and `setrlimit` syscalls to run the journald input. If this input is not being used, the syscalls are not needed. All Beats have those syscalls allowed now because the default seccomp policy is global to all Beats. {pull}40061[40061] - Beats will rate limit the logs about errors when indexing events on Elasticsearch, logging a summary every 10s. The logs sent to the event log is unchanged. {issue}40157[40157] +- Drop support for Debian 10 and upgrade statically linked glibc from 2.28 to 2.31 {pull}41402[41402] *Auditbeat* diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 972531c25a8d..ede35e08d8a3 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -221,11 +221,11 @@ func CrossBuildImage(platform string) (string, error) { switch { case platform == "darwin/amd64": - tagSuffix = "darwin-debian10" + tagSuffix = "darwin-debian11" case platform == "darwin/arm64": - tagSuffix = "darwin-arm64-debian10" + tagSuffix = "darwin-arm64-debian11" case platform == "darwin/universal": - tagSuffix = "darwin-arm64-debian10" + tagSuffix = "darwin-arm64-debian11" case platform == "linux/arm64": tagSuffix = "arm" case platform == "linux/armv5": @@ -235,13 +235,13 @@ func CrossBuildImage(platform string) (string, error) { case platform == "linux/armv7": tagSuffix = "armhf" case strings.HasPrefix(platform, "linux/mips"): - tagSuffix = "mips-debian10" + tagSuffix = "mips-debian11" case strings.HasPrefix(platform, "linux/ppc"): - tagSuffix = "ppc-debian10" + tagSuffix = "ppc-debian11" case platform == "linux/s390x": - tagSuffix = "s390x-debian10" + tagSuffix = "s390x-debian11" case strings.HasPrefix(platform, "linux"): - tagSuffix = "main-debian10" + tagSuffix = "main-debian11" } goVersion, err := GoVersion() From e44045051f5135f69308b135396e1e05dccbb629 Mon Sep 17 00:00:00 2001 From: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Date: Thu, 24 Oct 2024 04:49:54 -0400 Subject: [PATCH 77/90] Add 0-1 scale for Metricbeat Docker percentage fields (#41341) Update docs and generated files --------- Co-authored-by: Denis Rechkunov --- metricbeat/docs/fields.asciidoc | 44 +++++++++---------- metricbeat/module/docker/cpu/_meta/fields.yml | 16 +++---- metricbeat/module/docker/fields.go | 2 +- .../module/docker/memory/_meta/fields.yml | 4 +- x-pack/metricbeat/module/awsfargate/fields.go | 2 +- .../awsfargate/task_stats/_meta/fields.yml | 18 ++++---- .../module/containerd/cpu/_meta/fields.yml | 6 +-- x-pack/metricbeat/module/containerd/fields.go | 2 +- 8 files changed, 47 insertions(+), 47 deletions(-) diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 915cfa33f0a7..09fd5e532450 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -5224,7 +5224,7 @@ Runtime CPU metrics. *`awsfargate.task_stats.cpu.kernel.pct`*:: + -- -Percentage of time in kernel space. +Percentage of time in kernel space, expressed as a value between 0 and 1. type: scaled_float @@ -5236,7 +5236,7 @@ format: percent *`awsfargate.task_stats.cpu.kernel.norm.pct`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5258,7 +5258,7 @@ type: long *`awsfargate.task_stats.cpu.system.pct`*:: + -- -Percentage of total CPU time in the system. +Percentage of total CPU time in the system, expressed as a value between 0 and 1. type: scaled_float @@ -5270,7 +5270,7 @@ format: percent *`awsfargate.task_stats.cpu.system.norm.pct`*:: + -- -Percentage of total CPU time in the system normalized by the number of CPU cores. +Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5292,7 +5292,7 @@ type: long *`awsfargate.task_stats.cpu.user.pct`*:: + -- -Percentage of time in user space. +Percentage of time in user space, expressed as a value between 0 and 1. type: scaled_float @@ -5304,7 +5304,7 @@ format: percent *`awsfargate.task_stats.cpu.user.norm.pct`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. +Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5326,7 +5326,7 @@ type: long *`awsfargate.task_stats.cpu.total.pct`*:: + -- -Total CPU usage. +Total CPU usage, expressed as a value between 0 and 1. type: scaled_float @@ -5338,7 +5338,7 @@ format: percent *`awsfargate.task_stats.cpu.total.norm.pct`*:: + -- -Total CPU usage normalized by the number of CPU cores. +Total CPU usage normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5702,7 +5702,7 @@ format: bytes *`awsfargate.task_stats.memory.rss.pct`*:: + -- -Memory resident set size percentage. +Memory resident set size percentage, expressed as a value between 0 and 1. type: scaled_float @@ -12042,7 +12042,7 @@ type: double *`containerd.cpu.usage.total.pct`*:: + -- -Percentage of total CPU time normalized by the number of CPU cores +Percentage of total CPU time normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -12054,7 +12054,7 @@ format: percent *`containerd.cpu.usage.kernel.pct`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -12066,7 +12066,7 @@ format: percent *`containerd.cpu.usage.user.pct`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. +Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13632,7 +13632,7 @@ Runtime CPU metrics. *`docker.cpu.kernel.pct`*:: + -- -Percentage of time in kernel space. +Percentage of time in kernel space, expressed as a value between 0 and 1. type: scaled_float @@ -13644,7 +13644,7 @@ format: percent *`docker.cpu.kernel.norm.pct`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13666,7 +13666,7 @@ type: long *`docker.cpu.system.pct`*:: + -- -Percentage of total CPU time in the system. +Percentage of total CPU time in the system, expressed as a value between 0 and 1. type: scaled_float @@ -13678,7 +13678,7 @@ format: percent *`docker.cpu.system.norm.pct`*:: + -- -Percentage of total CPU time in the system normalized by the number of CPU cores. +Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13700,7 +13700,7 @@ type: long *`docker.cpu.user.pct`*:: + -- -Percentage of time in user space. +Percentage of time in user space, expressed as a value between 0 and 1. type: scaled_float @@ -13712,7 +13712,7 @@ format: percent *`docker.cpu.user.norm.pct`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. +Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13758,7 +13758,7 @@ format: percent *`docker.cpu.core.*.pct`*:: + -- -Percentage of CPU time in this core. +Percentage of CPU time in this core, expressed as a value between 0 and 1. type: object @@ -13770,7 +13770,7 @@ format: percent *`docker.cpu.core.*.norm.pct`*:: + -- -Percentage of CPU time in this core, normalized by the number of CPU cores. +Percentage of CPU time in this core normalized by the number of CPU cores, expressed as a value between 0 and 1. type: object @@ -14431,7 +14431,7 @@ format: bytes *`docker.memory.rss.pct`*:: + -- -Memory resident set size percentage. +Memory resident set size percentage, expressed as a value between 0 and 1. type: scaled_float @@ -14462,7 +14462,7 @@ format: bytes *`docker.memory.usage.pct`*:: + -- -Memory usage percentage. +Memory usage percentage, expressed as a value between 0 and 1. type: scaled_float diff --git a/metricbeat/module/docker/cpu/_meta/fields.yml b/metricbeat/module/docker/cpu/_meta/fields.yml index 228b7224bc2b..09ab9dcbb2e8 100644 --- a/metricbeat/module/docker/cpu/_meta/fields.yml +++ b/metricbeat/module/docker/cpu/_meta/fields.yml @@ -8,12 +8,12 @@ type: scaled_float format: percent description: > - Percentage of time in kernel space. + Percentage of time in kernel space, expressed as a value between 0 and 1. - name: kernel.norm.pct type: scaled_float format: percent description: > - Percentage of time in kernel space normalized by the number of CPU cores. + Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: kernel.ticks type: long description: > @@ -22,12 +22,12 @@ type: scaled_float format: percent description: > - Percentage of total CPU time in the system. + Percentage of total CPU time in the system, expressed as a value between 0 and 1. - name: system.norm.pct type: scaled_float format: percent description: > - Percentage of total CPU time in the system normalized by the number of CPU cores. + Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: system.ticks type: long description: > @@ -36,12 +36,12 @@ type: scaled_float format: percent description: > - Percentage of time in user space. + Percentage of time in user space, expressed as a value between 0 and 1. - name: user.norm.pct type: scaled_float format: percent description: > - Percentage of time in user space normalized by the number of CPU cores. + Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: user.ticks type: long description: > @@ -61,13 +61,13 @@ object_type: scaled_float format: percent description: > - Percentage of CPU time in this core. + Percentage of CPU time in this core, expressed as a value between 0 and 1. - name: core.*.norm.pct type: object object_type: scaled_float format: percent description: > - Percentage of CPU time in this core, normalized by the number of CPU cores. + Percentage of CPU time in this core normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: core.*.ticks type: object object_type: long diff --git a/metricbeat/module/docker/fields.go b/metricbeat/module/docker/fields.go index a313e328ed97..a18cbb5c6a98 100644 --- a/metricbeat/module/docker/fields.go +++ b/metricbeat/module/docker/fields.go @@ -32,5 +32,5 @@ func init() { // AssetDocker returns asset data. // This is the base64 encoded zlib format compressed contents of module/docker. func AssetDocker() string { - return "eJzsnEtv4zgSgO/5FYXsYYFGx8Yu9pTDArNJLzrY6UnQneweBgMPTZVtrilSTVJ2u3/9gKQk60FJfsiOM2gfcrDsqo/14qucG1ji5hYiSZeorgAMMxxv4frevXF9BRChpoolhklxC/+8AgDwD0EbYjRQyTlSgxHMlIyzZ6MrAIUcicZbmJMrAI3GMDHXt/Drtdb8+jf73kIqM6FSzNj8FmaEa7wCmDHkkb51mm5AkBhLfPZlNomVqmSaZO8EGO3rQcykiol9G4iIHDDThlENZCpTk4n9qwaVCsHEHKgUhjCBSo8yKWWaMlHxyeJJCKwDrmTIQhbEaBSjhXL7qpoxf9WxqmhxTERUeZbDLXGzlqr+rAPRvu68QDALYmBNNOA3pKl1ORNgFtgYxyjMpZAYDHNFxOB+UPfEIKwX6Am2JrR8maYwho2CVA9pnVy1lxzWypIJiSKFWmNYN0sOVfvwBIXoliGz73XrhmN1v+Gy7xiKWGiJzzKRktJMZnVLbMG4FPPAwx4293qWhnAPJ2dAOHcBMmMcdR6vLYFaAVyfgu1LRrUlcjm1ICuEKaLIIxekArogYo4RaCYo+gdMirCDDZkPGNEPMZmjkzlq1r0kPabifU6FYTHC3dPLMMVuiUogHyXUBMevKeEYTWZckvoH/NxwCwkqiqL+tMdET/5L1k7WnXZITGQwoBNCMeyoDFdIFV8gM1guwtl3jGC6cVEq0niKyn7BuoxK1VZjspEZRpfhUAykTV+peXoBJ2832+qNNvjqZnXVx5N7A1srZmhd2JcQEh3sx4RGNsKhQyMDc2LDilON6rVtmlnSonQFr0O9hBho8B7jeTeqk5WEPpu6eD67PZ+LLEo1mXeivYq/a3zHuNc+Gr1rHYGc/h8bj/ybk3PGdLWiMe25u0bU6ZiLHtb7AfzZnrD9Q98/pX+p0BXJHXBUcRrA9JLJozbeTC/hYfw4zBpUIQnvag/YXv1EaRqn3G0CrFwNUaqYmDtHcjYrtg+hA4g20DKsTE6x69o68SDoLd50Yxob5F7APKnavrzDAP5lv+rgD2dXzUOMXvS9bEtTpVCYzMaJnf2QysZRT2nlhWrFKE5smTgBmZ9KXA0yMlcGD4+g8GuK2uj3NpMFEdJzNn2Tg64JM2egzLlAJ9aOVqt1NRPwNcUUtY2kfBw7s7uvNn0wFPi2entFxSCCtWitWCMIBylGTvCbq0aHUV9KObL0BsUbqEiZnX+UpB8lqQGj0zgmanO6FRIRb7U82SWoTFC5o903W6bcqil3wtuoVyWj/6hZP2pWBQRX1Y35ode6dTn7byyHvqr8YIlCUotbygFvjb0yFvlLWrIijJMpx6DemZLx4MOUqaJhdVb2cOqeF+i+bmPLn4UAxsy1O7iyV4+DLQehVuZpSDq1ynoJPmI2bgrrm1UbUdY37B048uE/3OezzW6uqBjGGMWmaddUGjwAg/oh2FGj+C9RTKbaChmvCE+xxFUd23uIMEER2dFJAczoamTn41og4WZBF0iXA5S1krTm8VnlCx9Ln4yIIbBmnIMUfANT3FYE3zgU1XpItK0bCu1wK0Kzz/3+8cNPPz9/vPv44e4/vwMT2qjUZRMsiPZ36anGyE6o05TxyJkt+y6La+fy+1fmGWGcibk2CskymEtMGJw3ljo9/qdS0NQtUKwCjKDutNPNDWVnedlAZRSun6E0OriCOGGZsfdtI0ERTQKtQ9DVV7QDUt0eKKKwpJIzlDkHiVPUzSJTk6ShEjVAbSqjtOgpXPONmUkjgsok4Qw5zCiNcC3mGpvrA1Q9J2eY64KWRdYBqWMnPA/GycaWTBahMGzGmp1NfZmUbYxOEzbwItjXNGfdQsKcrWyhTrLZK9zkVMZMyAkpH7Zg2TzrgN8DmwEzNqLdXsxNV+sFowu/q822lH5wEVNIDd84hSjqJe2EvZCuQ9PuzoqmSE/U3xA5YHegbx1zzXc+JPeNwxVTJiX8BNvASvNdYwlQpVA4TzkJlaaB2xMtC+EcKKELjDyWBqK1pMydcBnZDLKW5VYOz8kU+aH3t0c0DHq9PXDn61RkYnbUHfGDmMm84MOU2MWkXV0ak+jb8TiSVI/8enJEZTxGMWcCxwpnqFBQHJOEjf3zicJYGpyQhE1Wfxv9/R/jv4wjphNONje+h+lmzSK8Ydt29WMbwPM19FBp/bhC5cK00uu8d3InxK7JT5BV9SMeryjQzt9kylr/zwDV/iODJpU2MknOYqpM005UrmvpDExupu0y1SnOq7I1Sr7LldoEl1NhDle3gyz7N8S0WsNraVa6GGNZuV/Zu9Z9chKGWd76yvDuwPknYK7S80lMkoSJefbh63fX+5n2M1ln1sp+veTWcm6CddbS2dORfeo2KGpGWg4RqYxjNtgu+M5JM65pyx30CPgfE5Fc16Oqr8YelKMDXAX5qA0LKOp/87DkHGhPSJaZu3oNXKAqtiIGJ2upljbgNJpRyLSt7F3cPcyZbsh0g0bTyzsjjI+oTFvOZTo6Entg/k2YnfdTmwvhIsxZWx4Ma5asSDl1YRKlB1vxfP7ypVIp9l3qvG4aZuQKtZvCbAS5LUfHxjp4qt0bPNDb0roj96cW4lxqaye1a2Ieyusv2p/yHO73mHx7Ba9/It9y6kDTOVyen33reZtv4dISqWbUxgpMoLHF+pgl2C9exOFrsPDCNLiEOWqdnIMWop2qlvV5+HLzkIMlQWVsJ8PM1Nn6bdu8sW+ivlbLTDEQvwzZDqBj3xWp7t1gT+LuQHXvNQDL6RJCl9hp4NJRv1KycdYAA+wLPzjB7m7zEK7ssycAe9gVp3QpM1QmPKZmLv8MmVAMpJ4Jl5AIMoe7sEQ4iOt0ifC4K852RpjKtOWfUww3LfjfzFf/MYS7Oq3fgVxudtRPgVhl3jg8RYY7pYta5oxXyA+Phq3TxWukRqsDg0yliWLQBGmZLf6ECSIr08klJUi9Zl9AguyOdL4EaWeqbbMmzV8UhPOj2DhN0ZDdtmHtKjpbOpJ9Drz75rSn/LwtfNRn6JDanu961KXRkOpe7vvVTTgzOLDOn5nBbsWMxoN68e5Tz0jtX//fC5KWe6wD7o4W6P63EjxtG2GLCv8eqFQKdSJ9z6iRME6UpONfExb9NhYY7hfecg6KuQXMp6ZCkW/dzH69vjO+diP4IwAA//+0neAX" + return "eJzsXE1v4zjSvudXFPIeXqDRbe8s9pTDArNJLzrY6UnQneweBgMPTZXtWkukmqTsuH/9gqQkWxJl+UN2nEH7kENoVT1VrC8WS/4Ac1zdQCT5HNUVgCET4w1c37l/XF8BRKi5otSQFDfw9ysAAL8I2jCjgcs4Rm4wgomSSb42uAJQGCPTeANTdgWg0RgSU30Dv11rHV//bv83k8qMuBQTmt7AhMUarwAmhHGkbxynDyBYghv47MesUktVySzN/xPAaD/3YiJVwuy/gYnIASZtiGtgY5mZnOz/a1CZECSmwKUwjAQqPcipbKLZRFR+s1wJAdsCbkORJS1I0CjiJXP7qaqx+NRhVaElCRNRZa0AN8fVUqr62haI9nPrCYKZMQNLpgFfkGd2y0mAmWFDjkEYl0JmMIwrYgb3A3XHDMJyhh7BWoUWX84pDMNaQab71E7B2lMOc6V0xKJIodYY5k3poWzvH6Ek3SIyfa9rN2yr+4lL3zFksdBin5uIlJRmNKlrYg0slmIaWOzA5j5P0rDYg5MTYHHsDGRCMerCXlsMtQJweQpsX3NUa0TOp2ZsgTBGFIXlglTAZ0xMMQJNgqNfICnCG2zYtEeLvk/YFB3NQTPupdkxEe9LJgwlCLePz/0EuzkqgfEg5SYov+Ysxmg0iSWrf8HnhhtIUXEU9dUOFT36h6ye7HZakUjkYECnjON7wJfUOWUETAODBYszu8tmaTf6Ly4j/RTezlwoIVVygZKBxcVi+o4RjFfOlkWWjFHZB+zGcqlQ9yC/IT4Pm3XABbvC1uMzOHp1aVoi5kobfHXlu0jmkfttsLr20I5Rby7cJZjXFglPb2a5Hvo2sxy+IxtmnGlUr635XN8WyvEBywl0CfbUkOr0VuRkP1moWkvSkvmtB51d60+l32aaTbdCexWrqOHbzQhaTi1S4eBdqwRy/F9sLPl/js5p+dUYStrhPsasc7m3bt9FC396z89V1O773frZPzr8WpGhjBOl2M2SPSI9J3lUn4L0HO6HD/2U7ApZuAlwwGn0Z86zJIvdmcnS1RBlisTUbXdMk/K0FerXtAHdBCvTUxxS15t4EOg1vPHKNPoJnQALz2t7eAcB/mEfdeAPx66aPZ9O6HvplmdKoTC5jlObSJHLRmdsoyBEtSCOIxtLToDMZyUXqIwsmMH9Ayj8lqE2+r31ZMGE9Dibe1MAXTIyZ0BZ4AKdWj1arnarScC3DDPU1pIKOXbG7h5t7kFfwNcx3jMqhQjGoqWihhH2Eowc4TcXjQ5DfSnhyKI3KN5ARMr1/CMk/QhJDTA6SxKmVqerkGxF+zbDky1BZYrKdcLfbJhyVVOxCW8jXm0o/UfM+hGzKkBwUT29H3oLXqez/8Gy75vdjxZRiGp5qdvjJbtnRpG/02YLRjEbxxjkO1Ey6V1MmSkeZmdp98fuaYbucWtbvhcCmJCbDnFhr24HaxyMW5qnQbKVq6yH4COycZNYV1ZtWFmX2DvgKMS/vyuyzW5bUVGMMYrG2bZUGmyAQb0JdpQU/2aKZKYtkaHv3a1xVWV7DxGmKCIrnRRARlctu5Brhiw2Mz5DPu8hrG1Qa7bPKg982vhmxAyDJcUxSBGvYIzriODnrKLayI22cUOhFbdCNP/eH58+/vzL06fbTx9v//UHkNBGZc6bYMa0Hz3INEY2oY4ziiOntvxZSmot/v0j84RRTGKqjUI2D/oSCYPTRqnTsf9cCp65AsUywAjqm3a63LC5WZ42cBmF42fIjQ6OII5Yrux9p25QRKPApBVsG8PaAVJdHyiiMKWNzVDmHEgco+1YZGbSLBSieohNm1Ba+JRb80Jm1LCgTSRhDzlMKQ1zLXON9fUeop6j0891QUuRdYDr2ITngcVsZUMmRSgMTag5CNblSfnB6DRmA8+CvmUF1jVImNLCBuo0z17hmbBNmCk7Icr7NbA8zzrA74EmQMZatDuLuXS1nBGf+VNtfqT0wkWkkJt45RiiqIe0E46OuoFWezorZ0g9ou750R6HKf2knZtV9Ca5rx0uSJmMxSc4BlZmFRslQBWFwmkWs1Bo6nma02JhcQyc8RlGHpYGprXk5DpcRjaNrKXcKsDHbIzxofe3R8xXer4d4M432ElictQd8b2YyCLgw5jZYtJWl8ak+mY4jCTXA19PDrhMhiimJHCocIIKBcchS2no10cKE2lwxFIaLX4a/PVvw/8bRqTTmK0++NGqD0uK8AOtp/uPnZcvaui+3PphgcqZaWU0fG/nTpmtyU/gVfUWj2cUePuhiSl/U+IMoNrfyWii0kam6VlUlXPaCZUbgDoDJpdpt6nqFP2qvEYpTrlSm2A5Fcbh4nYQy/4DMa3a8FyakS7BRFbuV/aOdZ8dhX7KWx8Z3h2YfwLq2lgfJSxNSUzzL1+/u95PtV/YMtdW/rKXq+VcgnXa0vnqwK66A4qasJYmIpdJQr2dgm8dNeNGu1yjR8B/SERyWbeqrhh7kI/2cBXkrTZMoIz/zWbJOaA9Ipvn29Wp4BKqogUzOFpKNbcGp9EMQqptxb4NdwfmnDfkvEGj6cQ7YRQPuMxa+jJbxhY7wPyTkc37mfWFcBCOqc0P+lVLHqQcuzASpXureL58/VqJFPuWOq/rhjlyhdqlMGtB7six5WAd7Gp3Gg90zr3uiPtzC+KCqjuHHzPTXm0DwTG28ax9L+hw60jYyyvYxmf2UqAOTLnD5VmDn3U/zgLg0pyypvpGNSfQ2MB/TDn3qydxeD0XLnKD5dBRNXcBtCTtWLXU+uGL0kOaVILLxCbWXNV5LbgeBNnXnV9r/KYUxJc0awG2nOEitf1k2eHeO6C68xyACnQp43PcquCNawOlZKNvAT2cMT86wu6e9BBc+XdPAOx+VzgbFzx9ecJDZqbyz+AJpSB1T7gER5AFuAtzhINwnc4RHnaFs84IY5m1/C5If2nB/1xB9Tc53DVs/T7lcr2j3lGiSt443EX66/hFLTnjFfzDQ8PWdPEartG6gUFMG4miVwdpyRZ/QgeRlXRySQ5Sj9kX4CC7Qzqfg7Rjqh2zRs23E8L+UR6cxmjYbsewdhZbx0PSfZrnXTntsejdhduGhvfJ7em2g10W9cnu+a6b3Sgmgz3z/IUMbmdMPOl1F28/d0hq//ofVUhb7sQOuIeaoftZK3hcD9WWEf49cKkU6lT6+VMjYZgqyYe/pRT9PhQYnj1e4+wV5hpgkZpKRn4MNH9ffmf42knwvwAAAP//GRdgEA==" } diff --git a/metricbeat/module/docker/memory/_meta/fields.yml b/metricbeat/module/docker/memory/_meta/fields.yml index 0ebb64ba9758..62bc1681fda1 100644 --- a/metricbeat/module/docker/memory/_meta/fields.yml +++ b/metricbeat/module/docker/memory/_meta/fields.yml @@ -53,7 +53,7 @@ type: scaled_float format: percent description: > - Memory resident set size percentage. + Memory resident set size percentage, expressed as a value between 0 and 1. - name: usage type: group description: > @@ -68,7 +68,7 @@ type: scaled_float format: percent description: > - Memory usage percentage. + Memory usage percentage, expressed as a value between 0 and 1. - name: total type: long format: bytes diff --git a/x-pack/metricbeat/module/awsfargate/fields.go b/x-pack/metricbeat/module/awsfargate/fields.go index e7a68db138f6..fd22574c8a59 100644 --- a/x-pack/metricbeat/module/awsfargate/fields.go +++ b/x-pack/metricbeat/module/awsfargate/fields.go @@ -19,5 +19,5 @@ func init() { // AssetAwsfargate returns asset data. // This is the base64 encoded zlib format compressed contents of module/awsfargate. func AssetAwsfargate() string { - return "eJzsWt9v2zYQfs9fcQgGdGsb56XYgx8KeG6KBVjSIEnXR+VMnW3OEqmSVF132/8+kKJsWT9sWU4yJ4gfLfLu++6++ySLPoEZLfqAcz1GNUFDRwCGm4j6cDz4cgMfs2+PjwBC0kzxxHAp+vD+CADgbrXvDmIZphEBk1FEzGiw2/1FiMkozjSMlYzBoJ7ZbzBEg0AiTCQXpncEoCgi1NSHERk8AhhzikLdd7lOQGBMJaj2YxYJ9WGiZJr4b4rbiluZFAa5INWLcESR7jEZBxjjDylwrgNiOmBRqg2p5dY8/IwWc6nCwvdr5Tgb3sAw2+pydUqdLzlZi7ADgmWEzhhsa05QiS7ZXVsH15ed84Y05oLbgCdjjHm06IxiFQlKkfbA9I2U5rJ7aQqgfKhsdQWbXR1og0ZXchVVXsn0vnAB4G4V5i6nq8FMV8NopmhgTopAM4UJhdl4FufWAXcx4Ke/z4Y3wfDT5e3g/PLsOrg4ux18GNwOgs/X58Gf7/49tWtPs7W1w51/ykOef8pTu9aubLaC0mRsakKlPMX5hJ+veJKQkr/UpnOl2yfXsumVIHkKHpIwfMzXvGanHO9LFwGGy/lfRQdkSmrt0GhAEea11G9hPuVsCvQ1xUiDkYWhcDV6A69OX8Gbwtc87DXXKyTNFYVOcqm+P1a3UwIfG7LYMJbKKdmVOBOtG11b9w0IZ0LOxYPgc5G7o4splmoRTFGFQcRjbmrRaYYRhcE4klhe0ALi76hCuHCJ4A+bowNOlqS1yMq2VIF0nQrDY4Lh1efcfnql9XXjX8w9IyUo6iWszL1VgVwGqWI0fUhIMRJ1K7aU0X6uss04IZBjcKS48OBAJ8ioTKxCQUgVPwEeYHFixH9QCKOFU4pI4xEpu8E2kklFlTZW2BrOZuVpW1GNpJh0I2ARuNjt668X2tDBlV4ajDybrAm20h7qNiqHKKUNfPaVlGf9kJLyQF2KZiCpJnVodffVttC2DYKDf4jaqXDYVzGO6aNYUJu6u9n432t+u5zQVONkK9yD0EkJcxdZ5KxCrmdcdnuO+cD1DM5PP3V9iFGE5afLzdlblmfAWBqnERoKXQ4NYaq4mLjKRHycTdeUVs/ydY1qAF8kIJO6QYJtw9SShv1cLvvYmcgK7mhhqDPgXLibgrQk9ZsN4Qjtx0etXnvtTGfn+rNUKRLG9yGx9kZMijoBF27RpL5xRoG18gdGmnmCu2cYmSeG80+g6GtK2ui31pcFCpnhru9hDnyO3Dwy6hwn6MTW2SKw0uACvqaUUvZL0vPaiYvbXt+n+yayst0s6ZLURg9svhtvuZmElChi1uf68GvvXTe37CDxpUoUr53Ae3Vwl+RZWHh3Jofq4ZaRIfEEbdz34sXHX3y8hsjuPp7p6cCMfLvIlwJP4xjV4nGexlE8J1e3P4BkQgrtlmfl7u4JPW/W07T5QmNerP7F6muI7G717kXQgTh9xVAbVL5+ttbtdY8/Luv4sqd8hL+eXI7+otqXatmFYGPPC2uCGJOEi4nfcPz6uFuFr3Hua+UP/LP/6lizc3XS/mrPXgUuDKkxsurorf5hEVcPMuE+bq9DF9m4V39WAFLAFy5COa8rdpt7Z5O84ZHvQdl4NgfK8SaEswOAe0U4821u1YwlfMW/oaFgLtXMCleT6W12mAY+27i04OGxgMcCmkwrDmPkUY/JtPZ9dktb3AruI/IIXBJSzW/p6/4wAI9QOm+OLn0zOqWbLXAPD7i+uVlzqyqApzj6npEi7f7AY9UImv+oOaIpcqg/m4G2QoRWZzQ7cLloYJFH33jm5I53HkIxn9250X1oJsbvB6CYC/yes2k4xoND13kN7hyvIGMtufe6x8VIppVfMM1y2PZU1kTi/uzyn1r25WdvLpiM7S3HxWweiFDJJKn9ZdD++LodIp9phSxBNqO6OcmxkVJS7XeyvglaFt7ejttD8gseoVybMVWFLFPzouQXJT8JJf8XAAD//5wYkBM=" + return "eJzsWl1v27gSfc+vGAQX6L1t49wFin3wQwGvm2IDbNIgSbePypga21xLpEpScd3d/e8LUpIl68OWZcfrFNGjRM6cMx+HEqkzmNGiDzjXY1QTNHQCYLgJqA+ngy938DG5e3oC4JNmikeGS9GH9ycAAA/5vAcIpR8HBEwGATGjwU5PH0JIRnGmYaxkCAb1zN5BHw0CCT+SXJjeCYCigFBTH0Zk8ARgzCnwdd/5OgOBIZWg2sssIurDRMk4Su8UpxWnMikMckGqF+CIAt1jMvQwxO9S4Fx7xLTHglgbUsupmfkZLeZS+YX7K+G4GN7BMJnqfHVynQ05W7GwBYKlhc4YbGrOUIku3l1aB7fXnf36NOaCW4NnYwx5sOiMIrcEJUs7YHokpbnsHpoCqNRUMrqCzY72tEGjK76KVV7x9L7wAOAhN/OQ0dVgpnkzmikamJMi0ExhRH7SnsW+dcCdDfjPnxfDO2/46fp+cHl9cetdXdwPPgzuB97n20vv93d/n9ux58nY2ubOrnKTZ1e5a1fSlfSWV+qMdUmohKfYn/DfGx5FpOT/at250O3ia5n0ipHMBfdJGD7mK1qzlY/3pYcAw2X/59YBmZJaOzQaUPhZLPVbmE85mwJ9jTHQYGShKVyM3sCr81fwpnCb+73mePmkuSLflVys98fqfkqQ2obENoylcpXsQpwUrWtdG/c1CGdCzsWT4HOWu6MLKZRq4U1R+V7AQ25q0WmGAfneOJBYHtAC4q+ofLhyjuA366MDThbFtcjKslSBdBsLw0OC4c3nTH56pfF17V/0PSMlKOhFrMy9VYCcB6lCNH2ISDESdSM2hNFeN8lknBDIMThSXKTgQEfI6C3Qt0iR1uQDakB4xCAmq3ZzIgH/d034U5l+haiQKnwGbMHixIB/Jx9GC1dPIg5HpOwEm24mFek9xcRwNit3bh6QQIpJN5oWp7NdZteMSC+0oaNLkDQYpGySVNl8JFB3TUFK+BjLcg3rw5RnGpunLM+UjnPRDCTWpI4tO2lOLLT9CKQjeYx1WGF6mOpz8TiINObMmtG4bvzXM3O/1IRY42TnkktIHUXNlZg9XYll3H2uZ1x2e+/7wPUMLs8/dX3pU4Tlt/H13lsGccBYHMYBGvKdDw1+rLiYuPgFfJz085Tyb5+6dDaALxKQUV1TwqbGbEnDXtfLbHcmksMdLQx1BpyV9zojLUn9Yk04QrvxUfk24dZ0to4/i5UiYdI8RFYqiUlRV8CFVwdSj5yRZxePJ0aaKIdbpYzMHMPlJ1D0NSZt9Fur8QKFTHDX5zADPkduDow6wwk6snG2CGxpcAFfY4op+fJOeW3FxU2vz9O+ieTinDhdklqrgc0r+4Ylx6dIEbM614efe++6qWWHEl9WieK1HbhXBXdOfggJ787kWDXcMjIknqGMp7l40fEXHa8hsr2OJ/V0ZEK+uciXBR6HIarFYd7G7efIj6Pq9gNIRqTQTvmh1N29oWfJep4yX0jMi9S/SH0Nke2l3m0XHYnSVwS1ocpXzyK7bfekx4sdN3vKvzysOpejP6h26y154K3NeWGMF2IUcTFJJ5y+Pu0W4Vucp7FKf5BI/m2yYufipNOnPfsUuDCkxsiqrZf/kRJWD35hH8vr0Fk2boPQFoAU8IULX87rgt1m7WwqbzjwGpS0Z7OhDG9EODsCuDeEszTNrZKxhK/4Ixry5lLNbOFqMr31CtPAZxOXFjxSLJBiAU2mFYcx8qDHZFy7691SFjeC+4g8AOeEVPNeft0PFnCA0KXi6Nw3o1O6WQJ30IDbu7sVtaoCeI6tnzJSpN0PT7YaQfPvNadTRQ71JzjQthCh1UnOFlyuGlhk1vdwfuWOip6irj67M6h9VFaI346grq7wW8bGxWx9JR1lN9TgzvAKMla4e697XIxkXPnOaS6HTe9uTST2J6p/1bIvv6FzwWRoFyZns7khfCWjqPb7of2BeTtEqaccWYRsRnV9kmEjpaTa7Sx/HbTEvF2020NKBxwgXOsxVQtZxualkl8q+VlU8j8BAAD//26nA2k=" } diff --git a/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml b/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml index b79be57b70c8..e9d6a6a5be91 100644 --- a/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml +++ b/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml @@ -34,12 +34,12 @@ type: scaled_float format: percent description: > - Percentage of time in kernel space. + Percentage of time in kernel space, expressed as a value between 0 and 1. - name: kernel.norm.pct type: scaled_float format: percent description: > - Percentage of time in kernel space normalized by the number of CPU cores. + Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: kernel.ticks type: long description: > @@ -48,12 +48,12 @@ type: scaled_float format: percent description: > - Percentage of total CPU time in the system. + Percentage of total CPU time in the system, expressed as a value between 0 and 1. - name: system.norm.pct type: scaled_float format: percent description: > - Percentage of total CPU time in the system normalized by the number of CPU cores. + Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: system.ticks type: long description: > @@ -62,12 +62,12 @@ type: scaled_float format: percent description: > - Percentage of time in user space. + Percentage of time in user space, expressed as a value between 0 and 1. - name: user.norm.pct type: scaled_float format: percent description: > - Percentage of time in user space normalized by the number of CPU cores. + Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: user.ticks type: long description: > @@ -76,12 +76,12 @@ type: scaled_float format: percent description: > - Total CPU usage. + Total CPU usage, expressed as a value between 0 and 1. - name: total.norm.pct type: scaled_float format: percent description: > - Total CPU usage normalized by the number of CPU cores. + Total CPU usage normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: diskio type: group description: Disk I/O metrics. @@ -244,7 +244,7 @@ type: scaled_float format: percent description: > - Memory resident set size percentage. + Memory resident set size percentage, expressed as a value between 0 and 1. - name: usage type: group description: > diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml b/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml index e2a99f3d4329..3e3a1d24caa6 100644 --- a/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml @@ -36,17 +36,17 @@ type: scaled_float format: percent description: > - Percentage of total CPU time normalized by the number of CPU cores + Percentage of total CPU time normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: kernel.pct type: scaled_float format: percent description: > - Percentage of time in kernel space normalized by the number of CPU cores. + Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: user.pct type: scaled_float format: percent description: > - Percentage of time in user space normalized by the number of CPU cores. + Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: cpu.*.ns type: object object_type: double diff --git a/x-pack/metricbeat/module/containerd/fields.go b/x-pack/metricbeat/module/containerd/fields.go index 9cc2c58832fb..a7506778b51a 100644 --- a/x-pack/metricbeat/module/containerd/fields.go +++ b/x-pack/metricbeat/module/containerd/fields.go @@ -19,5 +19,5 @@ func init() { // AssetContainerd returns asset data. // This is the base64 encoded zlib format compressed contents of module/containerd. func AssetContainerd() string { - return "eJzUmE1v8zYMx+/5FEQvAwY0u+cwYC1QoBi6Fdt6LmSZTrXoxdBL0/TTP5Bsp44t20qeBnF0KAq/kD/Sf5JSbmGDuxVQJS1hEnW+ALDMclzBzf3+4s0CQCNHYnAFGVqyAMjRUM1Ky5Rcwe8LAICvF8BYYg1QxTlSizkUWolDLwVDnptVePEWJBHYwfDL7kpcwVorV9ZXIm79epSF0oL4y0Bk5Z8Zy6gBkilnW6Z/MaCdlEyuvy6aZW2pTdUm839NSSju7zRwG9xt1R54BLGTob7FxlfGN0z1/LST4Ff3eyR4v+OKbuDxt79BoNWM7qOORd4myvGdHYQ+Fv4EhF9/EYGgCsgCUMR641gj6ZqOZyPB6R+UOuE48XL0dg3kTnsV2DcEzopA5P/fq6JjIpahNqwqTe9ew8uVXEduTiCHXDmRofZsJ0G3ZLWzeDRgVVXDLycEcOdfDfDHsTfcW81sXHs/KYJg+OpUcBr1XGTg6S3K05RgnBBE787XEPzkuE5V+KauStRhAl6tOkKTaD7CUTLpDVFauskRmjao/3HSMoFw//wSm5tDc3hsnpqdsSiWVlnCo2rOlct4t+lNpPE/bw2cQV3tgIKPQB3wTYnSApNgkCqZH8TwReYMWac32ynVb1BL7EY4bnLMbNu0jAkNxhPYrAQ9+uVT92eIoEoLSCJVnb3BkH3+rzngF6+f9HBjEr6qeG1dNUcFvCypHQzaUMIxfy24IrGHmmZZoqYoY08k0D9XL3to3xJDDPtKl94DZ5+YQ7YL/VLu54R/iCodadOHNTu3CH1cTNZwEM5NaXEuRyt1pmGGJv4tQdLSLX9dRmupClJl/2M0B9WN14lqSwjRU/bqy0dp35gJgL3hLVCog33etx2Be5bHxvRW6Q2Ta4M2opNJjYzrYyJxT4GzIQCDtrFD1hgf3dp0v/Lgpm1sw5a0y6jSCBoNy/22wvMZ9jlARqhl7/jAeM/Z+Qgrn1AwjpWROBol9C2+3zkHVPA2hsPkJXLVeJ3M1nG7wwnfL6En1EoKvxd229jU9lKQjwscVZ7IR0Md8jHcfC89XOpK4FzRcMitqYcaCSTv686X3IP2MpHegjC+pMpF05OU5QSgB8I4BCeoh1E4E2yY4oxabCWqgogXbvQgdnLl1oei6yvdBjy5gi9VBjWonVs11FzFrIviUJ4jVWG2pKv8k2vi3y0pr7AiKuzZ10PAnF01BKp510Jblp1KWPwIAAD//wv4XH8=" + return "eJzUmMtu6zYQhvd+isHZFCgat916UaAJECAo0gZtsw4oauSwpkiBlzjO0xdDSY4sURe7dSxrcXBgWTPf/P5nOMoNbHC3Aq6VY0KhSRcATjiJK/h2t//w2wLAoERmcQUJOrYASNFyIwontFrBLwsAgM8HwDrmLHAtJXKHKWRG54dZMoEytavw4A0olmMLgy63K3AFa6N9UX0SSUvXg8q0yRl9DEyV+YV1gltgifauEfo7C8YrJdT680O7rCI1qZpk9K8tGMf9nRpug7ut3gMPILYU6kascyVyI3QnT1MEutq/x4Tst1LzDTz8+Afk6Izg+6pjlTeJUnwTB6UPlT8CQdfvLEfQGSQBKBK9TmyQtUPH1ZiQ9FfOfe4lIztSXAupN+QC94ogRRaI6P97V7RCxBRqwurCdu7VvFKrdeTmCHLQyucJGmI7Cbphq53DowHLrup/eEIBt/RogD+OvebeGuHi3vuPJgiBr84Fp1HPxQZE71Cd5gTr85yZ3fkGAp0c1+kKGuq6QBNOwKt1RxgS9Y9wlE06hygv/OgROu2g/tMrJ3KEu6fn2LnZdw4Pnad2Zx3mS6cdk1E3p9onsj30RmT8m6KBt2jKDSjkCNQB3xaoHAgFFrlW6UENn2TesvX0YTvm+g0ahe0Kh0MOhW2GVjGjwbCA9TXBj3SRdL+FCkpZQDGlK/V6Syb9r7ngZ/LP9HJjFr6qel3VNUcVvCy46y3aciYxfcmkZrEv1cOyQMNRxb4xgf6pfJigaSSGGvadriiDFB+YQrIL81Ltzwn6EtcG7Q+A74VBazEFZoHBG5MeaYhtERX8FGbIz8uR1p6bEFS+UBUchNerr5GD2n6mYoQT4Qul4IVffr+Mtm8phU7+wahS5Y2XkQafIATV0mlp0sK9ChsAO/tCjrk+WC3/t7fuTuShzWCrzUaotUUXcdOok4ZdNCLcY+CsCcCiq+OwNca3BWPbv3Lvnji0I05abEoZwaAVKW0yxGfFRw8Z40684b2QnWTnIyxzQiYklkHiaJzx1/iKdQ6okG0IR6hLaFVnHVXruIV0JPdzmAmVk8KfKNtjbGyjzdn7Bd6OHtl7TR306B++lz6Cqk6QUvPwXl1R9w0SmLxKnk/cg/EyIm/GhFxy7aPyTFJ5AtA9ExJCEjT9KFLkop/ijF5sCFVCxBs3+u53cudW72HX17o1+OQOvlQbVKBubt1QcWWzbopDew50hd2ytvNP7om/tqy4wo4osWffDwFzdt0QqObdC01btjph8W8AAAD//wJ7gxo=" } From 5fdfa2c46a1aff2515fa1e0a8c3f1c69df4f479e Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Thu, 24 Oct 2024 11:33:28 +0200 Subject: [PATCH 78/90] Make ETW input GA (#41389) --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/docs/inputs/input-etw.asciidoc | 2 -- x-pack/filebeat/input/etw/input.go | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9603360efcae..1c497d922c18 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -326,6 +326,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - System module now supports reading from jounrald. {pull}41061[41061] - Add support to include AWS cloudwatch linked accounts when using log_group_name_prefix to define log group names. {pull}41206[41206] - Improved Azure Blob Storage input documentation. {pull}41252[41252] +- Make ETW input GA. {pull}41389[41389] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-etw.asciidoc b/x-pack/filebeat/docs/inputs/input-etw.asciidoc index c072542cf5a6..dcfd4732c26d 100644 --- a/x-pack/filebeat/docs/inputs/input-etw.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-etw.asciidoc @@ -9,8 +9,6 @@ ETW ++++ -beta[] - https://learn.microsoft.com/en-us/windows/win32/etw/event-tracing-portal[Event Tracing for Windows] is a powerful logging and tracing mechanism built into the Windows operating system. It provides a detailed view of application and system diff --git a/x-pack/filebeat/input/etw/input.go b/x-pack/filebeat/input/etw/input.go index f030ada04e06..b41e7347a3eb 100644 --- a/x-pack/filebeat/input/etw/input.go +++ b/x-pack/filebeat/input/etw/input.go @@ -79,7 +79,7 @@ type etwInput struct { func Plugin() input.Plugin { return input.Plugin{ Name: inputName, - Stability: feature.Beta, + Stability: feature.Stable, Info: "Collect ETW logs.", Manager: stateless.NewInputManager(configure), } From 336615051e0b6eb30328342892d8de54fe656c1e Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Thu, 24 Oct 2024 08:48:03 -0300 Subject: [PATCH 79/90] bump kind to 0.24.0 (#41309) * update kind to 0.24.0 --- .buildkite/filebeat/filebeat-pipeline.yml | 2 +- .buildkite/metricbeat/pipeline.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 2b58709b2138..6809042f901e 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -25,7 +25,7 @@ env: # Integration Tests K8S_VERSION: "v1.31.0" - ASDF_KIND_VERSION: "0.20.0" + ASDF_KIND_VERSION: "0.24.0" # Unit tests RACE_DETECTOR: "true" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 1a9dab4a2f95..63c1870c1585 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -22,7 +22,7 @@ env: # Integration Tests K8S_VERSION: "v1.31.0" - ASDF_KIND_VERSION: "0.20.0" + ASDF_KIND_VERSION: "0.24.0" # Module Tests BEAT_PATH: "metricbeat" From 5941e684d05c5dfa70c43e8231b838ae5bfcd249 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Thu, 24 Oct 2024 04:55:15 -0700 Subject: [PATCH 80/90] Lower logging levels in add_session_metadata processor (#41409) Reduce logging levels for some log messages in the add_session_metadata processor. If something goes wrong with the process cache where all, or most, processes are missed, many of these logs would be called for every process event, resulting in a lot of logging spam. These logs have been changed to Debug, which is below the default log level and will not cause log spam. The logs that have been reduce to Info are in a timer, so they will not cause a lot of spam, but they should be Informational messages There are better ways to detect if enrichment has failed, so changing the log levels shouldn't negatively affect anything. For example, an Elasticsearch query or alert on missing fields that only this processor will populate will show processes that weren't properly enriched (i.e. process.entry_leader fields). --- x-pack/auditbeat/processors/sessionmd/add_session_metadata.go | 4 ++-- x-pack/auditbeat/processors/sessionmd/processdb/db.go | 2 +- x-pack/auditbeat/processors/sessionmd/procfs/procfs.go | 2 +- .../kerneltracingprovider/kerneltracingprovider_linux.go | 4 ++-- .../sessionmd/provider/procfsprovider/procfsprovider.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index a01c80643256..93fed7096b33 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -174,7 +174,7 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { proc, err := p.provider.GetProcess(pid) if err != nil { e := fmt.Errorf("pid %v not found in db: %w", pid, err) - p.logger.Warnw("PID not found in provider", "pid", pid, "error", err) + p.logger.Debugw("PID not found in provider", "pid", pid, "error", err) return nil, e } fullProcess = *proc @@ -182,7 +182,7 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { fullProcess, err = p.db.GetProcess(pid) if err != nil { e := fmt.Errorf("pid %v not found in db: %w", pid, err) - p.logger.Warnw("PID not found in provider", "pid", pid, "error", err) + p.logger.Debugw("PID not found in provider", "pid", pid, "error", err) return nil, e } } diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/db.go b/x-pack/auditbeat/processors/sessionmd/processdb/db.go index e18c247a8590..1f97f7d0fd58 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/db.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/db.go @@ -421,7 +421,7 @@ func (db *DB) InsertExit(exit types.ProcessExitEvent) { pid := exit.PIDs.Tgid process, ok := db.processes[pid] if !ok { - db.logger.Errorf("could not insert exit, pid %v not found in db", pid) + db.logger.Debugf("could not insert exit, pid %v not found in db", pid) return } process.ExitCode = exit.ExitCode diff --git a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go index b76dfdfdb485..992e24858363 100644 --- a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go +++ b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go @@ -196,7 +196,7 @@ func (r ProcfsReader) GetAllProcesses() ([]ProcessInfo, error) { for _, proc := range procs { process_info, err := r.getProcessInfo(proc) if err != nil { - r.logger.Warnf("failed to read process info for %v", proc.PID) + r.logger.Debugf("failed to read process info for %v", proc.PID) } ret = append(ret, process_info) } diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go index 506d840b5efa..d3ec4ba7bd3d 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go @@ -211,7 +211,7 @@ func (p *prvdr) Sync(_ *beat.Event, pid uint32) error { func (p *prvdr) handleBackoff(now time.Time) { if p.inBackoff { if now.Sub(p.backoffStart) > backoffDuration { - p.logger.Warnw("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) + p.logger.Infow("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) p.inBackoff = false p.combinedWait = 0 * time.Millisecond } else { @@ -220,7 +220,7 @@ func (p *prvdr) handleBackoff(now time.Time) { } } else { if p.combinedWait > combinedWaitLimit { - p.logger.Warn("starting backoff") + p.logger.Info("starting backoff") p.inBackoff = true p.backoffStart = now p.backoffSkipped = 0 diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go index e29e70a0549b..34c3166f26fa 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go @@ -68,7 +68,7 @@ func (p prvdr) Sync(ev *beat.Event, pid uint32) error { pe.Env = procInfo.Env pe.Filename = procInfo.Filename } else { - p.logger.Warnw("couldn't get process info from proc for pid", "pid", pid, "error", err) + p.logger.Debugw("couldn't get process info from proc for pid", "pid", pid, "error", err) // If process info couldn't be taken from procfs, populate with as much info as // possible from the event pe.PIDs.Tgid = pid From f8d3880d13194ae9aaafcfaddcda21c7845cdf14 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Thu, 24 Oct 2024 06:41:03 -0700 Subject: [PATCH 81/90] Update go-quark to v0.2.0 (#41408) Update go-quark to v0.2.0. This version of go-quark includes a bugfix from libquark that fixes use of kprobes on newer Linux kernels. This also removes the license override for go-quark; the license in it should now be detected properly, removing the need for the override. --- NOTICE.txt | 208 +++++++++++++++++++++++++++++++- dev-tools/notice/overrides.json | 1 - go.mod | 2 +- go.sum | 4 +- 4 files changed, 207 insertions(+), 8 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index e6ebacd0ffe4..e473b3040b57 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -14747,14 +14747,214 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-quark -Version: v0.1.2 +Version: v0.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.1.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.2.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -Source code in this repository is licensed under the Apache License Version 2.0, -an Apache compatible license. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- diff --git a/dev-tools/notice/overrides.json b/dev-tools/notice/overrides.json index a50cac02e0fb..bb82c97ebe40 100644 --- a/dev-tools/notice/overrides.json +++ b/dev-tools/notice/overrides.json @@ -19,4 +19,3 @@ {"name": "github.com/JohnCGriffin/overflow", "licenceType": "MIT"} {"name": "github.com/elastic/ebpfevents", "licenceType": "Apache-2.0"} {"name": "go.opentelemetry.io/collector/config/configopaque", "licenceType": "Apache-2.0"} -{"name": "github.com/elastic/go-quark", "licenceType": "Apache-2.0"} diff --git a/go.mod b/go.mod index 252fea019974..e4352b97565d 100644 --- a/go.mod +++ b/go.mod @@ -192,7 +192,7 @@ require ( github.com/elastic/elastic-agent-libs v0.12.1 github.com/elastic/elastic-agent-system-metrics v0.11.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 - github.com/elastic/go-quark v0.1.2 + github.com/elastic/go-quark v0.2.0 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.15.0 github.com/elastic/tk-btf v0.1.0 diff --git a/go.sum b/go.sum index 3eadfda45096..a2b78a384f48 100644 --- a/go.sum +++ b/go.sum @@ -363,8 +363,8 @@ github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f h1:TsPpU5EAwlt github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f/go.mod h1:HHaWnZamYKWsR9/eZNHqRHob8iQDKnchHmmskT/SKko= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 h1:q8n4QjcLa4q39Q3fqHRknTBXBtegjriHFrB42YKgXGI= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P1ZxnKx2OsqY7KlHdCesqZWIhyq0Gs/QC/Us= -github.com/elastic/go-quark v0.1.2 h1:Hnov9q8D9ofS976SODWWYAZ23IpgPILxTUCiccmhw0c= -github.com/elastic/go-quark v0.1.2/go.mod h1:/ngqgumD/Z5vnFZ4XPN2kCbxnEfG5/Uc+bRvOBabVVA= +github.com/elastic/go-quark v0.2.0 h1:r2BL4NzvhESrrL/yA3AcHt8mwF7fvQDssBAUiOL1sdg= +github.com/elastic/go-quark v0.2.0/go.mod h1:/ngqgumD/Z5vnFZ4XPN2kCbxnEfG5/Uc+bRvOBabVVA= github.com/elastic/go-seccomp-bpf v1.4.0 h1:6y3lYrEHrLH9QzUgOiK8WDqmPaMnnB785WxibCNIOH4= github.com/elastic/go-seccomp-bpf v1.4.0/go.mod h1:wIMxjTbKpWGQk4CV9WltlG6haB4brjSH/dvAohBPM1I= github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 h1:yuiN60oaQUz2PtNpNhDI2H6zrCdfiiptmNdwV5WUaKA= From d3876d7c444cbbdbb43e4e8f0e4eb72421a1b91b Mon Sep 17 00:00:00 2001 From: Olga Naydyonock Date: Thu, 24 Oct 2024 17:11:33 +0300 Subject: [PATCH 82/90] Added weekly macos pipeline (#41308) --- .buildkite/macos-tests-pipeline.yml | 27 +++++++++++++++++++ catalog-info.yaml | 41 +++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 .buildkite/macos-tests-pipeline.yml diff --git a/.buildkite/macos-tests-pipeline.yml b/.buildkite/macos-tests-pipeline.yml new file mode 100644 index 000000000000..94f3f00248a4 --- /dev/null +++ b/.buildkite/macos-tests-pipeline.yml @@ -0,0 +1,27 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +env: + IMAGE_MACOS_ARM: "generic-13-ventura-arm" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + + # Other deps + ASDF_MAGE_VERSION: 1.15.0 + ASDF_NODEJS_VERSION: 18.17.1 + ASDF_PYTHON_VERSION: 3.10.9 + + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + +steps: + - label: ":mac: Auditbeat: macOS x86_64 Unit Tests" + command: echo "test!" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" diff --git a/catalog-info.yaml b/catalog-info.yaml index 61a12a15cd66..d4a48a5a1aae 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -1149,6 +1149,40 @@ spec: everyone: access_level: BUILD_AND_READ +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: beats-macos-tests + description: 'Runs of Beats macOS tests' + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-macos-tests +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-macos-tests + description: 'Runs of Beats macOS tests' + spec: + repository: elastic/beats + pipeline_file: ".buildkite/macos-tests-pipeline.yml" + cancel_intermediate_builds: false + provider_settings: + trigger_mode: none + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + release-eng: + access_level: BUILD_AND_READ + everyone: + access_level: BUILD_AND_READ + --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -1186,6 +1220,12 @@ spec: message: Daily trigger of Beats AWS tests env: PIPELINES_TO_TRIGGER: 'beats-aws-tests' + Weekly run of macOS tests: + branch: main + cronline: 0 0 * * 0 + message: Weekly trigger of Beats macOS tests + env: + PIPELINES_TO_TRIGGER: 'beats-macos-tests' skip_intermediate_builds: true provider_settings: trigger_mode: none @@ -1200,3 +1240,4 @@ spec: access_level: BUILD_AND_READ everyone: access_level: BUILD_AND_READ + From 3a96d4bcc563b629bd2358e91a94ec7b9238029a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 22:15:42 +0200 Subject: [PATCH 83/90] [DOCS] Fix typo in changeling (#41314) --- CHANGELOG.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 6b5731b98d39..5c273f7bfb1b 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -480,7 +480,7 @@ https://github.com/elastic/beats/compare/v8.13.2\...v8.13.3[View commits] *Affecting all Beats* -- Update Go version to 1.21.9. {pulk}38727[38727] +- Update Go version to 1.21.9. {pull}38727[38727] - The environment variable `BEATS_ADD_CLOUD_METADATA_PROVIDERS` overrides configured/default `add_cloud_metadata` providers. {pull}38669[38669] *Auditbeat* From a20fbdfaaf1d7156fd8033408651e2d42ab69807 Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Fri, 25 Oct 2024 07:06:51 +1030 Subject: [PATCH 84/90] x-pack/filebeat/input/entityanalytics/provider/internal/jamf: remove incorrect comments (#41411) --- .../input/entityanalytics/provider/jamf/internal/jamf/jamf.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go b/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go index d30562ca567c..98f98499882f 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go +++ b/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go @@ -205,9 +205,6 @@ func GetComputers(ctx context.Context, cli *http.Client, tenant string, tok Toke // GetUsers returns Jamf users using the list users API endpoint. tenant is the // Jamf user domain and key is the API token to use for the query. If user is not empty, // details for the specific user are returned, otherwise a list of all users is returned. -// The query parameter holds queries as described in https://developer.Jamf.com/docs/reference/user-query/ -// with the query syntax described at https://developer.Jamf.com/docs/reference/core-Jamf-api/#filter. -// Parts of the response may be omitted using the omit parameter. // // See https://developer.jamf.com/jamf-pro/reference/findusers for details. func GetUsers(ctx context.Context, cli *http.Client, tenant string, tok Token, query url.Values) ([]User, error) { From cac52436cc4245eb0a8ae184c1086e38ac1edbc4 Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Fri, 25 Oct 2024 07:07:11 +1030 Subject: [PATCH 85/90] x-pack/filebeat/docs/input: clarify interaction between split and cursor updates (#41384) --- x-pack/filebeat/docs/inputs/input-httpjson.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index 21766a515a8e..a7dd5d7634fa 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -783,6 +783,8 @@ Valid when used with `type: map`. When not empty, defines a new field where the If set to true, empty or missing value will be ignored and processing will pass on to the next nested split operation instead of failing with an error. Default: `false`. +Note that if `ignore_empty_value` is `true` and the final result is empty, no event will be published, and no cursor update will be made. If a cursor update must be made for all responses, this should be set to `false` and the ingest pipeline must be configured to tolerate empty event sets. + [float] ==== `response.split[].split` @@ -1556,6 +1558,8 @@ See <> . Cursor is a list of key value objects where arbitrary values are defined. The values are interpreted as <> and a default template can be set. Cursor state is kept between input restarts and updated once all the events for a request are published. +If no event is published, no cursor update is made. This can have implications on how cursor updates should be performed when the target API returns empty response sets. + Each cursor entry is formed by: - A `value` template, which will define the value to store when evaluated. From efbc4ff65c231a9c9c7256c3f41f94bae1989991 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Fri, 25 Oct 2024 00:14:40 -0700 Subject: [PATCH 86/90] Add Integration tests to Auditbeat CI pipeline (#37892) Refactor the CI integration test steps for the Auditbeat pipeline to use build matrices for Linux tests. By doing this, it will be easier to add additional VM types to the integration tests in the future. With this, the integration tests will run on Ubuntu 20.04, 22.04, 24.04 for both x86_64 and arm64, and RHEL 9 x86_64. --- .buildkite/auditbeat/auditbeat-pipeline.yml | 38 +++++++++++++++------ 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index e083df17749b..b65044e73445 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -3,7 +3,6 @@ name: "beats-auditbeat" env: AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" - AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" @@ -12,7 +11,12 @@ env: IMAGE_MACOS_ARM: "generic-13-ventura-arm" IMAGE_MACOS_X86_64: "generic-13-ventura-x64" IMAGE_RHEL9: "family/platform-ingest-beats-rhel-9" - IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" + IMAGE_UBUNTU_2004_X86_64: "family/platform-ingest-beats-ubuntu-2004" + IMAGE_UBUNTU_2004_ARM64: "platform-ingest-beats-ubuntu-2004-aarch64" + IMAGE_UBUNTU_2204_X86_64: "family/platform-ingest-beats-ubuntu-2204" + IMAGE_UBUNTU_2204_ARM64: "platform-ingest-beats-ubuntu-2204-aarch64" + IMAGE_UBUNTU_2404_X86_64: "family/platform-ingest-beats-ubuntu-2404" + IMAGE_UBUNTU_2404_ARM64: "platform-ingest-beats-ubuntu-2404-aarch64" IMAGE_WIN_10: "family/platform-ingest-beats-windows-10" IMAGE_WIN_11: "family/platform-ingest-beats-windows-11" IMAGE_WIN_2016: "family/platform-ingest-beats-windows-2016" @@ -81,7 +85,7 @@ steps: - limit: 1 agents: provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" + image: "${IMAGE_UBUNTU_2204_X86_64}" machineType: "${GCP_DEFAULT_MACHINE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -181,7 +185,7 @@ steps: - limit: 1 agents: provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" + image: "${IMAGE_UBUNTU_2204_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: @@ -192,7 +196,7 @@ steps: if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS|arm|integrations).*/ steps: - - label: ":ubuntu: Auditbeat: Ubuntu x86_64 Integration Tests" + - label: ":ubuntu: Auditbeat: Ubuntu x86_64 Integration Tests -- {{matrix.image}}" key: "auditbeat-extended-integ-tests" if: build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ command: | @@ -204,7 +208,7 @@ steps: - limit: 1 agents: provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" + image: "{{matrix.image}}" machineType: "${GCP_DEFAULT_MACHINE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -218,8 +222,14 @@ steps: notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Integration Tests" + matrix: + setup: + image: + - "${IMAGE_UBUNTU_2004_X86_64}" + - "${IMAGE_UBUNTU_2204_X86_64}" + - "${IMAGE_UBUNTU_2404_X86_64}" - - label: ":ubuntu: Auditbeat: Ubuntu arm64 Integration Tests" + - label: ":ubuntu: Auditbeat: Ubuntu arm64 Integration Tests -- {{matrix.image}}" key: "auditbeat-extended-arm64-integ-tests" if: build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ command: | @@ -231,7 +241,7 @@ steps: - limit: 1 agents: provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" + imagePrefix: "{{matrix.image}}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -245,6 +255,12 @@ steps: notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Integration Tests" + matrix: + setup: + image: + - "${IMAGE_UBUNTU_2004_ARM64}" + - "${IMAGE_UBUNTU_2204_ARM64}" + - "${IMAGE_UBUNTU_2404_ARM64}" - label: ":ubuntu: Auditbeat: Ubuntu arm64 Unit Tests" key: "auditbeat-extended-arm64-unit-tests" @@ -258,7 +274,7 @@ steps: - limit: 1 agents: provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" + imagePrefix: "${IMAGE_UBUNTU_2204_ARM64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -436,7 +452,7 @@ steps: timeout_in_minutes: 20 agents: provider: gcp - image: "${IMAGE_UBUNTU_X86_64}" + image: "${IMAGE_UBUNTU_2204_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: @@ -457,7 +473,7 @@ steps: timeout_in_minutes: 20 agents: provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" + imagePrefix: "${IMAGE_UBUNTU_2204_ARM64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" notify: - github_commit_status: From 4dfef8b290af22fb2d8f96b65d2e416614441a2a Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Fri, 25 Oct 2024 10:50:30 -0400 Subject: [PATCH 87/90] Add test for elasticsearch re-connection after network error & allow graceful shutdown (#40794) This commit reworks the `eslegclient.Connection` to accept a context in its `Connect` method, this allows the caller to cancel any in flight requests made by the connection by cancelling the context. The libbeat `outputs.Connectable` interface (used by `outputs.NetworkClient`) had to be updated to accept the context, which required refactoring in most of the outputs to also accept a context on connect. The worker from libbeat/publisher/pipeline/client_worker.go now uses a context for it's cancellation instead of a channel, this context is also used when creating a connection to Elasticsearch. An integration test is added to ensure the ES output can always recover from network errors. --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 53 +++++++ filebeat/beater/filebeat.go | 37 +++-- filebeat/fileset/modules_integration_test.go | 5 +- filebeat/fileset/pipelines_test.go | 5 +- go.mod | 2 + go.sum | 4 + heartbeat/beater/heartbeat.go | 8 +- heartbeat/beater/heartbeat_test.go | 3 +- .../wrappers/monitorstate/testutil.go | 5 +- libbeat/cmd/instance/beat.go | 4 +- libbeat/esleg/eslegclient/api_mock_test.go | 13 +- libbeat/esleg/eslegclient/api_test.go | 12 +- .../esleg/eslegclient/bulkapi_mock_test.go | 6 +- libbeat/esleg/eslegclient/connection.go | 29 ++-- .../connection_integration_test.go | 35 ++--- libbeat/esleg/eslegclient/connection_test.go | 4 +- libbeat/esleg/eslegtest/util.go | 8 +- .../client_handler_integration_test.go | 5 +- libbeat/licenser/elastic_fetcher.go | 24 +-- .../elastic_fetcher_integration_test.go | 15 +- libbeat/licenser/elastic_fetcher_test.go | 47 +++++- .../monitoring/report/elasticsearch/client.go | 4 +- .../report/elasticsearch/elasticsearch.go | 5 +- libbeat/outputs/backoff.go | 4 +- libbeat/outputs/elasticsearch/client.go | 6 +- .../elasticsearch/client_integration_test.go | 8 +- .../elasticsearch/client_proxy_test.go | 5 +- libbeat/outputs/elasticsearch/client_test.go | 8 +- libbeat/outputs/failover.go | 4 +- libbeat/outputs/logstash/async.go | 4 +- libbeat/outputs/logstash/async_test.go | 4 +- .../logstash/logstash_integration_test.go | 9 +- libbeat/outputs/logstash/logstash_test.go | 8 +- libbeat/outputs/logstash/sync.go | 4 +- libbeat/outputs/logstash/sync_test.go | 4 +- libbeat/outputs/outputs.go | 2 +- libbeat/outputs/redis/backoff.go | 2 +- .../outputs/redis/redis_integration_test.go | 4 +- libbeat/publisher/pipeline/client_worker.go | 32 ++-- libbeat/publisher/pipeline/testing.go | 2 +- libbeat/template/load_integration_test.go | 13 +- .../tests/integration/elasticsearch_test.go | 148 ++++++++++++++++++ packetbeat/beater/packetbeat.go | 5 +- winlogbeat/beater/winlogbeat.go | 4 +- x-pack/winlogbeat/module/testing.go | 5 +- .../winlogbeat/module/wintest/docker_test.go | 4 +- .../module/wintest/simulate_test.go | 5 +- 48 files changed, 481 insertions(+), 147 deletions(-) create mode 100644 libbeat/tests/integration/elasticsearch_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1c497d922c18..c1084282c1b3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -110,6 +110,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] - Set timeout of 1 minute for FQDN requests {pull}37756[37756] - Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use {pull}41356[41356] +- Ensure Elasticsearch output can always recover from network errors {pull}40794[40794] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index e473b3040b57..dcd146fe27a1 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -16257,6 +16257,29 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.15.0/LI limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/mock-es +Version: v0.0.0-20240712014503-e5b47ece0015 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/mock-es@v0.0.0-20240712014503-e5b47ece0015/LICENSE: + +Copyright 2024 Elasticsearch B.V. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/tk-btf Version: v0.1.0 @@ -48611,6 +48634,36 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/mileusna/useragent +Version: v1.3.4 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mileusna/useragent@v1.3.4/LICENSE.md: + +MIT License + +Copyright (c) 2017 Miloš Mileusnić + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + -------------------------------------------------------------------------------- Dependency : github.com/minio/asm2plan9s Version: v0.0.0-20200509001527-cdd76441f9d8 diff --git a/filebeat/beater/filebeat.go b/filebeat/beater/filebeat.go index 9d9cb220d4eb..815b6fabfde2 100644 --- a/filebeat/beater/filebeat.go +++ b/filebeat/beater/filebeat.go @@ -18,6 +18,7 @@ package beater import ( + "context" "flag" "fmt" "path/filepath" @@ -195,14 +196,16 @@ func (fb *Filebeat) setupPipelineLoaderCallback(b *beat.Beat) error { overwritePipelines := true b.OverwritePipelinesCallback = func(esConfig *conf.C) error { - esClient, err := eslegclient.NewConnectedClient(esConfig, "Filebeat") + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Filebeat") if err != nil { return err } // When running the subcommand setup, configuration from modules.d directories // have to be loaded using cfg.Reloader. Otherwise those configurations are skipped. - pipelineLoaderFactory := newPipelineLoaderFactory(b.Config.Output.Config()) + pipelineLoaderFactory := newPipelineLoaderFactory(ctx, b.Config.Output.Config()) enableAllFilesets, _ := b.BeatConfig.Bool("config.modules.enable_all_filesets", -1) forceEnableModuleFilesets, _ := b.BeatConfig.Bool("config.modules.force_enable_module_filesets", -1) filesetOverrides := fileset.FilesetOverrides{ @@ -322,14 +325,6 @@ func (fb *Filebeat) Run(b *beat.Beat) error { outDone := make(chan struct{}) // outDone closes down all active pipeline connections pipelineConnector := channel.NewOutletFactory(outDone).Create - // Create a ES connection factory for dynamic modules pipeline loading - var pipelineLoaderFactory fileset.PipelineLoaderFactory - if b.Config.Output.Name() == "elasticsearch" { - pipelineLoaderFactory = newPipelineLoaderFactory(b.Config.Output.Config()) - } else { - logp.Warn(pipelinesWarning) - } - inputsLogger := logp.NewLogger("input") v2Inputs := fb.pluginFactory(b.Info, inputsLogger, stateStore) v2InputLoader, err := v2.NewLoader(inputsLogger, v2Inputs, "type", cfg.DefaultType) @@ -350,8 +345,22 @@ func (fb *Filebeat) Run(b *beat.Beat) error { compat.RunnerFactory(inputsLogger, b.Info, v2InputLoader), input.NewRunnerFactory(pipelineConnector, registrar, fb.done), )) - moduleLoader := fileset.NewFactory(inputLoader, b.Info, pipelineLoaderFactory, config.OverwritePipelines) + // Create a ES connection factory for dynamic modules pipeline loading + var pipelineLoaderFactory fileset.PipelineLoaderFactory + // The pipelineFactory needs a context to control the connections to ES, + // when the pipelineFactory/ESClient are not needed any more the context + // must be cancelled. This pipeline factory will be used by the moduleLoader + // that is run by a crawler, whenever this crawler is stopped we also cancel + // the context. + pipelineFactoryCtx, cancelPipelineFactoryCtx := context.WithCancel(context.Background()) + defer cancelPipelineFactoryCtx() + if b.Config.Output.Name() == "elasticsearch" { + pipelineLoaderFactory = newPipelineLoaderFactory(pipelineFactoryCtx, b.Config.Output.Config()) + } else { + logp.Warn(pipelinesWarning) + } + moduleLoader := fileset.NewFactory(inputLoader, b.Info, pipelineLoaderFactory, config.OverwritePipelines) crawler, err := newCrawler(inputLoader, moduleLoader, config.Inputs, fb.done, *once) if err != nil { logp.Err("Could not init crawler: %v", err) @@ -389,6 +398,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { err = crawler.Start(fb.pipeline, config.ConfigInput, config.ConfigModules) if err != nil { crawler.Stop() + cancelPipelineFactoryCtx() return fmt.Errorf("Failed to start crawler: %w", err) } @@ -444,6 +454,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { modules.Stop() adiscover.Stop() crawler.Stop() + cancelPipelineFactoryCtx() timeout := fb.config.ShutdownTimeout // Checks if on shutdown it should wait for all events to be published @@ -487,9 +498,9 @@ func (fb *Filebeat) Stop() { } // Create a new pipeline loader (es client) factory -func newPipelineLoaderFactory(esConfig *conf.C) fileset.PipelineLoaderFactory { +func newPipelineLoaderFactory(ctx context.Context, esConfig *conf.C) fileset.PipelineLoaderFactory { pipelineLoaderFactory := func() (fileset.PipelineLoader, error) { - esClient, err := eslegclient.NewConnectedClient(esConfig, "Filebeat") + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Filebeat") if err != nil { return nil, fmt.Errorf("Error creating Elasticsearch client: %w", err) } diff --git a/filebeat/fileset/modules_integration_test.go b/filebeat/fileset/modules_integration_test.go index 0d5ad2172c06..ffb149e53b3c 100644 --- a/filebeat/fileset/modules_integration_test.go +++ b/filebeat/fileset/modules_integration_test.go @@ -20,6 +20,7 @@ package fileset import ( + "context" "encoding/json" "path/filepath" "testing" @@ -268,7 +269,9 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *eslegclient.Connection { conn.Encoder = eslegclient.NewJSONEncoder(nil, false) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) if err != nil { t.Fatal(err) panic(err) // panic in case TestLogger did not stop test diff --git a/filebeat/fileset/pipelines_test.go b/filebeat/fileset/pipelines_test.go index a358b0da9be6..ac6aa5035de7 100644 --- a/filebeat/fileset/pipelines_test.go +++ b/filebeat/fileset/pipelines_test.go @@ -20,6 +20,7 @@ package fileset import ( + "context" "net/http" "net/http/httptest" "testing" @@ -101,7 +102,9 @@ func TestLoadPipelinesWithMultiPipelineFileset(t *testing.T) { }) require.NoError(t, err) - err = testESClient.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = testESClient.Connect(ctx) require.NoError(t, err) err = testRegistry.LoadPipelines(testESClient, false) diff --git a/go.mod b/go.mod index e4352b97565d..fe7f8ea884d2 100644 --- a/go.mod +++ b/go.mod @@ -195,6 +195,7 @@ require ( github.com/elastic/go-quark v0.2.0 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.15.0 + github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 github.com/elastic/tk-btf v0.1.0 github.com/elastic/toutoumomoma v0.0.0-20240626215117-76e39db18dfb github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 @@ -340,6 +341,7 @@ require ( github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mileusna/useragent v1.3.4 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect diff --git a/go.sum b/go.sum index a2b78a384f48..124206af0401 100644 --- a/go.sum +++ b/go.sum @@ -383,6 +383,8 @@ github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/u github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/mito v1.15.0 h1:MicOxLSVkgU2Aonbh3i+++66Wl5wvD8y9gALK8PQDYs= github.com/elastic/mito v1.15.0/go.mod h1:J+wCf4HccW2YoSFmZMGu+d06gN+WmnIlj5ehBqine74= +github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 h1:z8cC8GASpPo8yKlbnXI36HQ/BM9wYjhBPNbDjAWm0VU= +github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015/go.mod h1:qH9DX/Dmflz6EAtaks/+2SsdQzecVAKE174Zl66hk7E= github.com/elastic/pkcs8 v1.0.0 h1:HhitlUKxhN288kcNcYkjW6/ouvuwJWd9ioxpjnD9jVA= github.com/elastic/pkcs8 v1.0.0/go.mod h1:ipsZToJfq1MxclVTwpG7U/bgeDtf+0HkUiOxebk95+0= github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= @@ -703,6 +705,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/mileusna/useragent v1.3.4 h1:MiuRRuvGjEie1+yZHO88UBYg8YBC/ddF6T7F56i3PCk= +github.com/mileusna/useragent v1.3.4/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index 9a849f6bc7e7..227b375ee900 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -88,7 +88,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { if b.Config.Output.Name() == "elasticsearch" && !b.Manager.Enabled() { // Connect to ES and setup the State loader if the output is not managed by agent // Note this, intentionally, blocks until connected or max attempts reached - esClient, err := makeESClient(b.Config.Output.Config(), 3, 2*time.Second) + esClient, err := makeESClient(context.TODO(), b.Config.Output.Config(), 3, 2*time.Second) if err != nil { if parsedConfig.RunOnce { trace.Abort() @@ -275,7 +275,7 @@ func (bt *Heartbeat) RunCentralMgmtMonitors(b *beat.Beat) { } // Backoff panics with 0 duration, set to smallest unit - esClient, err := makeESClient(outCfg.Config(), 1, 1*time.Nanosecond) + esClient, err := makeESClient(context.TODO(), outCfg.Config(), 1, 1*time.Nanosecond) if err != nil { logp.L().Warnf("skipping monitor state management during managed reload: %w", err) } else { @@ -324,7 +324,7 @@ func (bt *Heartbeat) Stop() { } // makeESClient establishes an ES connection meant to load monitors' state -func makeESClient(cfg *conf.C, attempts int, wait time.Duration) (*eslegclient.Connection, error) { +func makeESClient(ctx context.Context, cfg *conf.C, attempts int, wait time.Duration) (*eslegclient.Connection, error) { var ( esClient *eslegclient.Connection err error @@ -353,7 +353,7 @@ func makeESClient(cfg *conf.C, attempts int, wait time.Duration) (*eslegclient.C } for i := 0; i < attempts; i++ { - esClient, err = eslegclient.NewConnectedClient(newCfg, "Heartbeat") + esClient, err = eslegclient.NewConnectedClient(ctx, newCfg, "Heartbeat") if err == nil { connectDelay.Reset() return esClient, nil diff --git a/heartbeat/beater/heartbeat_test.go b/heartbeat/beater/heartbeat_test.go index 669811dc4c82..279366a0e7ee 100644 --- a/heartbeat/beater/heartbeat_test.go +++ b/heartbeat/beater/heartbeat_test.go @@ -18,6 +18,7 @@ package beater import ( + "context" "testing" "time" @@ -39,7 +40,7 @@ func TestMakeESClient(t *testing.T) { anyAttempt := 1 anyDuration := 1 * time.Second - _, _ = makeESClient(origCfg, anyAttempt, anyDuration) + _, _ = makeESClient(context.Background(), origCfg, anyAttempt, anyDuration) timeout, err := origCfg.Int("timeout", -1) require.NoError(t, err) diff --git a/heartbeat/monitors/wrappers/monitorstate/testutil.go b/heartbeat/monitors/wrappers/monitorstate/testutil.go index 28a6c2606557..be58dcdb924b 100644 --- a/heartbeat/monitors/wrappers/monitorstate/testutil.go +++ b/heartbeat/monitors/wrappers/monitorstate/testutil.go @@ -18,6 +18,7 @@ package monitorstate import ( + "context" "encoding/json" "testing" @@ -50,7 +51,9 @@ func IntegES(t *testing.T) (esc *eslegclient.Connection) { conn.Encoder = eslegclient.NewJSONEncoder(nil, false) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) if err != nil { t.Fatal(err) panic(err) // panic in case TestLogger did not stop test diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 23efa03b4897..6332ebac39b5 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -898,7 +898,9 @@ func (b *Beat) Setup(settings Settings, bt beat.Creator, setup SetupSettings) er if !isElasticsearchOutput(outCfg.Name()) { return fmt.Errorf("index management requested but the Elasticsearch output is not configured/enabled") } - esClient, err := eslegclient.NewConnectedClient(outCfg.Config(), b.Info.Beat) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, outCfg.Config(), b.Info.Beat) if err != nil { return err } diff --git a/libbeat/esleg/eslegclient/api_mock_test.go b/libbeat/esleg/eslegclient/api_mock_test.go index 97834dcda51c..231ee4378008 100644 --- a/libbeat/esleg/eslegclient/api_mock_test.go +++ b/libbeat/esleg/eslegclient/api_mock_test.go @@ -20,6 +20,7 @@ package eslegclient import ( + "context" "encoding/json" "fmt" "net/http" @@ -63,14 +64,14 @@ func TestOneHostSuccessResp(t *testing.T) { server := ElasticsearchMock(200, expectedResp) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", } _, resp, err := client.Index(index, "test", "1", params, body) if err != nil { - t.Errorf("Index() returns error: %s", err) + t.Fatalf("Index() returns error: %s", err) } if !resp.Created { t.Errorf("Index() fails: %s", resp) @@ -89,8 +90,10 @@ func TestOneHost500Resp(t *testing.T) { server := ElasticsearchMock(http.StatusInternalServerError, []byte("Something wrong happened")) - client := newTestConnection(server.URL) - err := client.Connect() + client := newTestConnection(t, server.URL) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err := client.Connect(ctx) if err != nil { t.Fatalf("Failed to connect: %v", err) } @@ -121,7 +124,7 @@ func TestOneHost503Resp(t *testing.T) { server := ElasticsearchMock(503, []byte("Something wrong happened")) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", diff --git a/libbeat/esleg/eslegclient/api_test.go b/libbeat/esleg/eslegclient/api_test.go index 6c7dd675ccf9..0bd0f5341b53 100644 --- a/libbeat/esleg/eslegclient/api_test.go +++ b/libbeat/esleg/eslegclient/api_test.go @@ -19,6 +19,7 @@ package eslegclient import ( + "context" "encoding/json" "testing" @@ -170,11 +171,20 @@ func TestReadSearchResult_invalid(t *testing.T) { assert.Error(t, err) } -func newTestConnection(url string) *Connection { +// newTestConnection creates a new connection for testing +// +//nolint:unused // it's used by files with the !integration constraint +func newTestConnection(t *testing.T, url string) *Connection { conn, _ := NewConnection(ConnectionSettings{ URL: url, }) conn.Encoder = NewJSONEncoder(nil, false) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := conn.Connect(ctx); err != nil { + t.Fatalf("cannot connect to Elasticsearch: %s", err) + } + return conn } diff --git a/libbeat/esleg/eslegclient/bulkapi_mock_test.go b/libbeat/esleg/eslegclient/bulkapi_mock_test.go index 96434819ecac..598204386f95 100644 --- a/libbeat/esleg/eslegclient/bulkapi_mock_test.go +++ b/libbeat/esleg/eslegclient/bulkapi_mock_test.go @@ -60,7 +60,7 @@ func TestOneHostSuccessResp_Bulk(t *testing.T) { server := ElasticsearchMock(200, expectedResp) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", @@ -95,7 +95,7 @@ func TestOneHost500Resp_Bulk(t *testing.T) { server := ElasticsearchMock(http.StatusInternalServerError, []byte("Something wrong happened")) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", @@ -134,7 +134,7 @@ func TestOneHost503Resp_Bulk(t *testing.T) { server := ElasticsearchMock(503, []byte("Something wrong happened")) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", diff --git a/libbeat/esleg/eslegclient/connection.go b/libbeat/esleg/eslegclient/connection.go index 6a22132080f9..310aa853e340 100644 --- a/libbeat/esleg/eslegclient/connection.go +++ b/libbeat/esleg/eslegclient/connection.go @@ -67,7 +67,6 @@ type Connection struct { // requests will share the same cancellable context // so they can be aborted on Close() reqsContext context.Context - cancelReqs func() } // ConnectionSettings are the settings needed for a Connection @@ -82,7 +81,7 @@ type ConnectionSettings struct { Kerberos *kerberos.Config - OnConnectCallback func() error + OnConnectCallback func(*Connection) error Observer transport.IOStatser Parameters map[string]string @@ -109,7 +108,7 @@ type ESVersionData struct { BuildFlavor string `json:"build_flavor"` } -// NewConnection returns a new Elasticsearch client +// NewConnection returns a new Elasticsearch client. func NewConnection(s ConnectionSettings) (*Connection, error) { logger := logp.NewLogger("esclientleg") @@ -184,15 +183,12 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { logger.Info("kerberos client created") } - ctx, cancelFunc := context.WithCancel(context.Background()) conn := Connection{ ConnectionSettings: s, HTTP: esClient, Encoder: encoder, log: logger, responseBuffer: bytes.NewBuffer(nil), - reqsContext: ctx, - cancelReqs: cancelFunc, } if s.APIKey != "" { @@ -255,7 +251,7 @@ func NewClients(cfg *cfg.C, beatname string) ([]Connection, error) { } // NewConnectedClient returns a non-thread-safe connection. Make sure for each goroutine you initialize a new connection. -func NewConnectedClient(cfg *cfg.C, beatname string) (*Connection, error) { +func NewConnectedClient(ctx context.Context, cfg *cfg.C, beatname string) (*Connection, error) { clients, err := NewClients(cfg, beatname) if err != nil { return nil, err @@ -264,7 +260,7 @@ func NewConnectedClient(cfg *cfg.C, beatname string) (*Connection, error) { errors := []string{} for _, client := range clients { - err = client.Connect() + err = client.Connect(ctx) if err != nil { const errMsg = "error connecting to Elasticsearch at %v: %v" client.log.Errorf(errMsg, client.URL, err) @@ -279,17 +275,22 @@ func NewConnectedClient(cfg *cfg.C, beatname string) (*Connection, error) { // Connect connects the client. It runs a GET request against the root URL of // the configured host, updates the known Elasticsearch version and calls -// globally configured handlers. -func (conn *Connection) Connect() error { +// globally configured handlers. The context is used to control the lifecycle +// of the HTTP requests/connections, the caller is responsible for cancelling +// the context to stop any in-flight requests. +func (conn *Connection) Connect(ctx context.Context) error { if conn.log == nil { conn.log = logp.NewLogger("esclientleg") } + + conn.reqsContext = ctx + if err := conn.getVersion(); err != nil { return err } if conn.OnConnectCallback != nil { - if err := conn.OnConnectCallback(); err != nil { + if err := conn.OnConnectCallback(conn); err != nil { return fmt.Errorf("Connection marked as failed because the onConnect callback failed: %w", err) } } @@ -323,7 +324,7 @@ func (conn *Connection) Ping() (ESPingData, error) { return response, nil } -// Close closes a connection. +// Close closes any idle connections from the HTTP client. func (conn *Connection) Close() error { conn.HTTP.CloseIdleConnections() return nil @@ -358,7 +359,9 @@ func (conn *Connection) Test(d testing.Driver) { }) } - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) d.Fatal("talk to server", err) version := conn.GetVersion() d.Info("version", version.String()) diff --git a/libbeat/esleg/eslegclient/connection_integration_test.go b/libbeat/esleg/eslegclient/connection_integration_test.go index b4e277ed1a6b..b56360b42321 100644 --- a/libbeat/esleg/eslegclient/connection_integration_test.go +++ b/libbeat/esleg/eslegclient/connection_integration_test.go @@ -21,8 +21,7 @@ package eslegclient import ( "context" - "io/ioutil" - "math/rand" + "io" "net" "net/http" "net/http/httptest" @@ -34,17 +33,25 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/esleg/eslegtest" - "github.com/elastic/beats/v7/libbeat/outputs" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) func TestConnect(t *testing.T) { conn := getTestingElasticsearch(t) - err := conn.Connect() + err := conn.Connect(context.Background()) assert.NoError(t, err) } +func TestConnectionCanBeClosedAndReused(t *testing.T) { + conn := getTestingElasticsearch(t) + ctx, cancel := context.WithCancel(context.Background()) + assert.NoError(t, conn.Connect(ctx), "first connect must succeed") + assert.NoError(t, conn.Close(), "close must succeed") + cancel() + assert.NoError(t, conn.Connect(context.Background()), "calling connect after close must succeed") +} + func TestConnectWithProxy(t *testing.T) { wrongPort, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) @@ -66,7 +73,9 @@ func TestConnectWithProxy(t *testing.T) { "timeout": 5, // seconds }) require.NoError(t, err) - assert.Error(t, client.Connect(), "it should fail without proxy") + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + assert.Error(t, client.Connect(ctx), "it should fail without proxy") client, err = connectTestEs(t, map[string]interface{}{ "hosts": "http://" + wrongPort.Addr().String(), @@ -74,7 +83,7 @@ func TestConnectWithProxy(t *testing.T) { "timeout": 5, // seconds }) require.NoError(t, err) - assert.NoError(t, client.Connect()) + assert.NoError(t, client.Connect(ctx)) } func connectTestEs(t *testing.T, cfg interface{}) (*Connection, error) { @@ -139,16 +148,6 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *Connection { return conn } -func randomClient(grp outputs.Group) outputs.NetworkClient { - L := len(grp.Clients) - if L == 0 { - panic("no elasticsearch client") - } - - client := grp.Clients[rand.Intn(L)] - return client.(outputs.NetworkClient) -} - // startTestProxy starts a proxy that redirects all connections to the specified URL func startTestProxy(t *testing.T, redirectURL string) *httptest.Server { t.Helper() @@ -166,14 +165,14 @@ func startTestProxy(t *testing.T, redirectURL string) *httptest.Server { require.NoError(t, err) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) for _, header := range []string{"Content-Encoding", "Content-Type"} { w.Header().Set(header, resp.Header.Get(header)) } w.WriteHeader(resp.StatusCode) - w.Write(body) + w.Write(body) //nolint: errcheck // It's a test, we can ignore this error })) return proxy } diff --git a/libbeat/esleg/eslegclient/connection_test.go b/libbeat/esleg/eslegclient/connection_test.go index 19fe67e9f55b..77cbcdda674f 100644 --- a/libbeat/esleg/eslegclient/connection_test.go +++ b/libbeat/esleg/eslegclient/connection_test.go @@ -162,7 +162,9 @@ func TestUserAgentHeader(t *testing.T) { testCase.connSettings.URL = server.URL conn, err := NewConnection(testCase.connSettings) require.NoError(t, err) - require.NoError(t, conn.Connect(), "conn.Connect must not return an error") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, conn.Connect(ctx), "conn.Connect must not return an error") }) } } diff --git a/libbeat/esleg/eslegtest/util.go b/libbeat/esleg/eslegtest/util.go index 28f33fde2dcb..e86ca14363d8 100644 --- a/libbeat/esleg/eslegtest/util.go +++ b/libbeat/esleg/eslegtest/util.go @@ -18,6 +18,7 @@ package eslegtest import ( + "context" "fmt" "os" ) @@ -32,20 +33,23 @@ const ( // TestLogger is used to report fatal errors to the testing framework. type TestLogger interface { Fatal(args ...interface{}) + Cleanup(f func()) } // Connectable defines the minimum interface required to initialize a connected // client. type Connectable interface { - Connect() error + Connect(context.Context) error } // InitConnection initializes a new connection if the no error value from creating the // connection instance is reported. // The test logger will be used if an error is found. func InitConnection(t TestLogger, conn Connectable, err error) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) if err == nil { - err = conn.Connect() + err = conn.Connect(ctx) } if err != nil { diff --git a/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go b/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go index 67b9a1cfb06d..6f81bf98a029 100644 --- a/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go +++ b/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go @@ -20,6 +20,7 @@ package lifecycle import ( + "context" "fmt" "os" "testing" @@ -141,7 +142,9 @@ func newRawESClient(t *testing.T) ESClient { t.Fatal(err) } - if err := client.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { t.Fatalf("Failed to connect to Test Elasticsearch instance: %v", err) } diff --git a/libbeat/licenser/elastic_fetcher.go b/libbeat/licenser/elastic_fetcher.go index bcbe68a938f9..1f869d61fefb 100644 --- a/libbeat/licenser/elastic_fetcher.go +++ b/libbeat/licenser/elastic_fetcher.go @@ -18,10 +18,10 @@ package licenser import ( + "context" "encoding/json" "errors" "fmt" - "math/rand" "net/http" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" @@ -98,6 +98,7 @@ func (f *ElasticFetcher) parseJSON(b []byte) (License, error) { // esClientMux is taking care of round robin request over an array of elasticsearch client, note that // calling request is not threadsafe. +// nolint: unused // it's used on Linux type esClientMux struct { clients []eslegclient.Connection idx int @@ -107,6 +108,7 @@ type esClientMux struct { // at the end of the function call, if an error occur we return the error and will pick up the next client on the // next call. Not that we just round robin between hosts, any backoff strategy should be handled by // the consumer of this type. +// nolint: unused // it's used on Linux func (mux *esClientMux) Request( method, path string, pipeline string, @@ -115,7 +117,9 @@ func (mux *esClientMux) Request( ) (int, []byte, error) { c := mux.clients[mux.idx] - if err := c.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := c.Connect(ctx); err != nil { return 0, nil, err } defer c.Close() @@ -127,19 +131,3 @@ func (mux *esClientMux) Request( } return status, response, err } - -// newESClientMux takes a list of clients and randomize where we start and the list of host we are -// querying. -func newESClientMux(clients []eslegclient.Connection) *esClientMux { - // randomize where we start - idx := rand.Intn(len(clients)) - - // randomize the list of round robin hosts. - tmp := make([]eslegclient.Connection, len(clients)) - copy(tmp, clients) - rand.Shuffle(len(tmp), func(i, j int) { - tmp[i], tmp[j] = tmp[j], tmp[i] - }) - - return &esClientMux{idx: idx, clients: tmp} -} diff --git a/libbeat/licenser/elastic_fetcher_integration_test.go b/libbeat/licenser/elastic_fetcher_integration_test.go index f303bfe0d8c9..7560ebb394d8 100644 --- a/libbeat/licenser/elastic_fetcher_integration_test.go +++ b/libbeat/licenser/elastic_fetcher_integration_test.go @@ -20,6 +20,7 @@ package licenser import ( + "context" "testing" "time" @@ -35,7 +36,7 @@ const ( elasticsearchPort = "9200" ) -func getTestClient() *eslegclient.Connection { +func getTestClient(t *testing.T) *eslegclient.Connection { transport := httpcommon.DefaultHTTPTransportSettings() transport.Timeout = 60 * time.Second @@ -47,16 +48,22 @@ func getTestClient() *eslegclient.Connection { CompressionLevel: 3, Transport: transport, }) - if err != nil { - panic(err) + t.Fatalf("cannot get new ES connection: %s", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to ES: %s", err) } + return client } // Sanity check for schema change on the HTTP response from a live Elasticsearch instance. func TestElasticsearch(t *testing.T) { - f := NewElasticFetcher(getTestClient()) + f := NewElasticFetcher(getTestClient(t)) license, err := f.Fetch() if !assert.NoError(t, err) { return diff --git a/libbeat/licenser/elastic_fetcher_test.go b/libbeat/licenser/elastic_fetcher_test.go index 731bf5c0618f..82ca7e47ca29 100644 --- a/libbeat/licenser/elastic_fetcher_test.go +++ b/libbeat/licenser/elastic_fetcher_test.go @@ -18,7 +18,8 @@ package licenser import ( - "io/ioutil" + "context" + "fmt" "net/http" "net/http/httptest" "os" @@ -26,15 +27,41 @@ import ( "testing" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" + "github.com/elastic/beats/v7/libbeat/version" "github.com/stretchr/testify/assert" ) +func esRootHandler(w http.ResponseWriter, r *http.Request) { + respStr := fmt.Sprintf(` +{ + "name" : "582a64c35c16", + "cluster_name" : "docker-cluster", + "cluster_uuid" : "fnanWPBeSNS9KZ930Z5JmA", + "version" : { + "number" : "%s", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "14b7170921f2f0e4109255b83cb9af175385d87f", + "build_date" : "2024-08-23T00:26:58.284513650Z", + "build_snapshot" : true, + "lucene_version" : "9.11.1", + "minimum_wire_compatibility_version" : "7.17.0", + "minimum_index_compatibility_version" : "7.0.0" + }, + "tagline" : "You Know, for Search" +}`, version.GetDefaultVersion()) + + w.Write([]byte(respStr)) +} + func newServerClientPair(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *eslegclient.Connection) { mux := http.NewServeMux() - mux.Handle("/_license/", http.HandlerFunc(handler)) + mux.Handle("/", http.HandlerFunc(esRootHandler)) + mux.Handle("/_license/", handler) server := httptest.NewServer(mux) + t.Cleanup(server.Close) client, err := eslegclient.NewConnection(eslegclient.ConnectionSettings{ URL: server.URL, @@ -43,13 +70,19 @@ func newServerClientPair(t *testing.T, handler http.HandlerFunc) (*httptest.Serv t.Fatalf("could not create the elasticsearch client, error: %s", err) } + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to ES: %s", err) + } + return server, client } func TestParseJSON(t *testing.T) { t.Run("OSS release of Elasticsearch (Code: 405)", func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Method Not Allowed", 405) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) } s, c := newServerClientPair(t, h) defer s.Close() @@ -75,7 +108,7 @@ func TestParseJSON(t *testing.T) { t.Run("malformed JSON", func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hello bad JSON")) + _, _ = w.Write([]byte("hello bad JSON")) } s, c := newServerClientPair(t, h) defer s.Close() @@ -88,7 +121,7 @@ func TestParseJSON(t *testing.T) { t.Run("401 response", func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Unauthorized", 401) + http.Error(w, "Unauthorized", http.StatusUnauthorized) } s, c := newServerClientPair(t, h) defer s.Close() @@ -113,14 +146,14 @@ func TestParseJSON(t *testing.T) { }) t.Run("200 response", func(t *testing.T) { - filepath.Walk("testdata/", func(path string, i os.FileInfo, err error) error { + _ = filepath.Walk("testdata/", func(path string, i os.FileInfo, err error) error { if i.IsDir() { return nil } t.Run(path, func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - json, err := ioutil.ReadFile(path) + json, err := os.ReadFile(path) if err != nil { t.Fatal("could not read JSON") } diff --git a/libbeat/monitoring/report/elasticsearch/client.go b/libbeat/monitoring/report/elasticsearch/client.go index 56f56ac8e1e3..28be1c379172 100644 --- a/libbeat/monitoring/report/elasticsearch/client.go +++ b/libbeat/monitoring/report/elasticsearch/client.go @@ -59,10 +59,10 @@ func newPublishClient( return p, nil } -func (c *publishClient) Connect() error { +func (c *publishClient) Connect(ctx context.Context) error { c.log.Debug("Monitoring client: connect.") - err := c.es.Connect() + err := c.es.Connect(ctx) if err != nil { return fmt.Errorf("cannot connect underlying Elasticsearch client: %w", err) } diff --git a/libbeat/monitoring/report/elasticsearch/elasticsearch.go b/libbeat/monitoring/report/elasticsearch/elasticsearch.go index da3f6135110e..61e051d12220 100644 --- a/libbeat/monitoring/report/elasticsearch/elasticsearch.go +++ b/libbeat/monitoring/report/elasticsearch/elasticsearch.go @@ -18,6 +18,7 @@ package elasticsearch import ( + "context" "errors" "io" "math/rand" @@ -214,8 +215,10 @@ func (r *reporter) initLoop(c config) { for { // Select one configured endpoint by random and check if xpack is available + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() client := r.out[rand.Intn(len(r.out))] - err := client.Connect() + err := client.Connect(ctx) if err == nil { closing(log, client) break diff --git a/libbeat/outputs/backoff.go b/libbeat/outputs/backoff.go index 3c7f8e51e107..87d94bb66d0d 100644 --- a/libbeat/outputs/backoff.go +++ b/libbeat/outputs/backoff.go @@ -45,8 +45,8 @@ func WithBackoff(client NetworkClient, init, max time.Duration) NetworkClient { } } -func (b *backoffClient) Connect() error { - err := b.client.Connect() +func (b *backoffClient) Connect(ctx context.Context) error { + err := b.client.Connect(ctx) backoff.WaitOnError(b.backoff, err) return err } diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 70c4cc1cce50..56f28cdbf30d 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -133,7 +133,7 @@ func NewClient( return nil, err } - conn.OnConnectCallback = func() error { + conn.OnConnectCallback = func(conn *eslegclient.Connection) error { globalCallbackRegistry.mutex.Lock() defer globalCallbackRegistry.mutex.Unlock() @@ -532,8 +532,8 @@ func (client *Client) applyItemStatus( return true } -func (client *Client) Connect() error { - return client.conn.Connect() +func (client *Client) Connect(ctx context.Context) error { + return client.conn.Connect(ctx) } func (client *Client) Close() error { diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 765fd3eec5aa..f4fb0e4f9a94 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -429,8 +429,12 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu } client := randomClient(output).(clientWrap).Client().(*Client) - // Load version number - _ = client.Connect() + // Load version ctx + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to ES: %s", err) + } return client, client } diff --git a/libbeat/outputs/elasticsearch/client_proxy_test.go b/libbeat/outputs/elasticsearch/client_proxy_test.go index c2f23f34052a..bd6739c3bf02 100644 --- a/libbeat/outputs/elasticsearch/client_proxy_test.go +++ b/libbeat/outputs/elasticsearch/client_proxy_test.go @@ -22,6 +22,7 @@ package elasticsearch import ( "bytes" + "context" "fmt" "net/http" "net/http/httptest" @@ -209,10 +210,12 @@ func doClientPing(t *testing.T) { client, err := NewClient(clientSettings, nil) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) // This ping won't succeed; we aren't testing end-to-end communication // (which would require a lot more setup work), we just want to make sure // the client is pointed at the right server or proxy. - _ = client.Connect() + _ = client.Connect(ctx) } // serverState contains the state of the http listeners for proxy tests, diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index 5124c0defe9d..abda06a02ee1 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -748,8 +748,10 @@ func TestClientWithHeaders(t *testing.T) { }, nil) assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) // simple ping - err = client.Connect() + err = client.Connect(ctx) assert.NoError(t, err) assert.Equal(t, 1, requestCount) @@ -943,11 +945,13 @@ func TestClientWithAPIKey(t *testing.T) { }, nil) assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) // This connection will fail since the server doesn't return a valid // response. This is fine since we're just testing the headers in the // original client request. //nolint:errcheck // connection doesn't need to succeed - client.Connect() + client.Connect(ctx) assert.Equal(t, "ApiKey aHlva0hHNEJmV2s1dmlLWjE3Mlg6bzQ1SlVreXVTLS15aVNBdXV4bDhVdw==", headers.Get("Authorization")) } diff --git a/libbeat/outputs/failover.go b/libbeat/outputs/failover.go index 3e999e8321f2..d69e01b03ccd 100644 --- a/libbeat/outputs/failover.go +++ b/libbeat/outputs/failover.go @@ -54,7 +54,7 @@ func NewFailoverClient(clients []NetworkClient) NetworkClient { } } -func (f *failoverClient) Connect() error { +func (f *failoverClient) Connect(ctx context.Context) error { var ( next int active = f.active @@ -82,7 +82,7 @@ func (f *failoverClient) Connect() error { client := f.clients[next] f.active = next - return client.Connect() + return client.Connect(ctx) } func (f *failoverClient) Close() error { diff --git a/libbeat/outputs/logstash/async.go b/libbeat/outputs/logstash/async.go index b1e20a0e7749..a980d1cef32c 100644 --- a/libbeat/outputs/logstash/async.go +++ b/libbeat/outputs/logstash/async.go @@ -91,7 +91,7 @@ func newAsyncClient( } c.connect = func() error { - err := c.Client.Connect() + err := c.Client.ConnectContext(context.Background()) if err == nil { c.client, err = clientFactory(c.Client) } @@ -116,7 +116,7 @@ func makeClientFactory( } } -func (c *asyncClient) Connect() error { +func (c *asyncClient) Connect(ctx context.Context) error { c.log.Debug("connect") return c.connect() } diff --git a/libbeat/outputs/logstash/async_test.go b/libbeat/outputs/logstash/async_test.go index 6e2a102edf26..12d2edd124c4 100644 --- a/libbeat/outputs/logstash/async_test.go +++ b/libbeat/outputs/logstash/async_test.go @@ -72,6 +72,8 @@ func newAsyncTestDriver(client outputs.NetworkClient) *testAsyncDriver { go func() { defer driver.wg.Done() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for { cmd, ok := <-driver.ch if !ok { @@ -82,7 +84,7 @@ func newAsyncTestDriver(client outputs.NetworkClient) *testAsyncDriver { case driverCmdQuit: return case driverCmdConnect: - driver.client.Connect() + driver.client.Connect(ctx) case driverCmdClose: driver.client.Close() case driverCmdPublish: diff --git a/libbeat/outputs/logstash/logstash_integration_test.go b/libbeat/outputs/logstash/logstash_integration_test.go index 442145835dfd..286717e49ede 100644 --- a/libbeat/outputs/logstash/logstash_integration_test.go +++ b/libbeat/outputs/logstash/logstash_integration_test.go @@ -115,6 +115,11 @@ func esConnect(t *testing.T, index string) *esConnection { Password: password, Transport: transport, }) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to LS: %s:", err) + } if err != nil { t.Fatal(err) } @@ -207,7 +212,9 @@ func newTestElasticsearchOutput(t *testing.T, test string) *testOutputer { // The Elasticsearch output requires events to be encoded // before calling Publish, so create an event encoder. es.encoder = grp.EncoderFactory() - es.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + es.Connect(ctx) return es } diff --git a/libbeat/outputs/logstash/logstash_test.go b/libbeat/outputs/logstash/logstash_test.go index fa1b57fb841b..5be2054cf2a2 100644 --- a/libbeat/outputs/logstash/logstash_test.go +++ b/libbeat/outputs/logstash/logstash_test.go @@ -116,7 +116,9 @@ func testConnectionType( output := makeOutputer() t.Logf("new outputter: %v", output) - err := output.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := output.Connect(ctx) if err != nil { t.Error("test client failed to connect: ", err) return @@ -186,8 +188,10 @@ func newTestLumberjackOutput( t.Fatalf("init logstash output plugin failed: %v", err) } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() client := grp.Clients[0].(outputs.NetworkClient) - if err := client.Connect(); err != nil { + if err := client.Connect(ctx); err != nil { t.Fatalf("Client failed to connected: %v", err) } diff --git a/libbeat/outputs/logstash/sync.go b/libbeat/outputs/logstash/sync.go index d24ab1ebb97d..6a4569073650 100644 --- a/libbeat/outputs/logstash/sync.go +++ b/libbeat/outputs/logstash/sync.go @@ -74,9 +74,9 @@ func newSyncClient( return c, nil } -func (c *syncClient) Connect() error { +func (c *syncClient) Connect(ctx context.Context) error { c.log.Debug("connect") - err := c.Client.Connect() + err := c.Client.ConnectContext(ctx) if err != nil { return err } diff --git a/libbeat/outputs/logstash/sync_test.go b/libbeat/outputs/logstash/sync_test.go index d0410c2a8a72..0d8a3e0f5132 100644 --- a/libbeat/outputs/logstash/sync_test.go +++ b/libbeat/outputs/logstash/sync_test.go @@ -86,6 +86,8 @@ func newClientTestDriver(client outputs.NetworkClient) *testSyncDriver { go func() { defer driver.wg.Done() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for { cmd, ok := <-driver.ch if !ok { @@ -96,7 +98,7 @@ func newClientTestDriver(client outputs.NetworkClient) *testSyncDriver { case driverCmdQuit: return case driverCmdConnect: - driver.client.Connect() + driver.client.Connect(ctx) case driverCmdClose: driver.client.Close() case driverCmdPublish: diff --git a/libbeat/outputs/outputs.go b/libbeat/outputs/outputs.go index 0fdf4d9407b6..3cfdb5aef66b 100644 --- a/libbeat/outputs/outputs.go +++ b/libbeat/outputs/outputs.go @@ -57,5 +57,5 @@ type Connectable interface { // The connection attempt shall report an error if no connection could been // established within the given time interval. A timeout value of 0 == wait // forever. - Connect() error + Connect(context.Context) error } diff --git a/libbeat/outputs/redis/backoff.go b/libbeat/outputs/redis/backoff.go index ef3dcd7cc48b..2abc1f846f0a 100644 --- a/libbeat/outputs/redis/backoff.go +++ b/libbeat/outputs/redis/backoff.go @@ -60,7 +60,7 @@ func newBackoffClient(client *client, init, max time.Duration) *backoffClient { } } -func (b *backoffClient) Connect() error { +func (b *backoffClient) Connect(ctx context.Context) error { err := b.client.Connect() if err != nil { // give the client a chance to promote an internal error to a network error. diff --git a/libbeat/outputs/redis/redis_integration_test.go b/libbeat/outputs/redis/redis_integration_test.go index dfd48dc75d23..6fd3e09397ab 100644 --- a/libbeat/outputs/redis/redis_integration_test.go +++ b/libbeat/outputs/redis/redis_integration_test.go @@ -336,7 +336,9 @@ func newRedisTestingOutput(t *testing.T, cfg map[string]interface{}) outputs.Cli } client := out.Clients[0].(outputs.NetworkClient) - if err := client.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { t.Fatalf("Failed to connect to redis host: %v", err) } diff --git a/libbeat/publisher/pipeline/client_worker.go b/libbeat/publisher/pipeline/client_worker.go index e05658d97499..3e6b8202dd21 100644 --- a/libbeat/publisher/pipeline/client_worker.go +++ b/libbeat/publisher/pipeline/client_worker.go @@ -29,8 +29,8 @@ import ( ) type worker struct { - qu chan publisher.Batch - done chan struct{} + qu chan publisher.Batch + cancel func() } // clientWorker manages output client of type outputs.Client, not supporting reconnect. @@ -50,14 +50,15 @@ type netClientWorker struct { } func makeClientWorker(qu chan publisher.Batch, client outputs.Client, logger logger, tracer *apm.Tracer) outputWorker { + ctx, cancel := context.WithCancel(context.Background()) w := worker{ - qu: qu, - done: make(chan struct{}), + qu: qu, + cancel: cancel, } var c interface { outputWorker - run() + run(context.Context) } if nc, ok := client.(outputs.NetworkClient); ok { @@ -71,12 +72,12 @@ func makeClientWorker(qu chan publisher.Batch, client outputs.Client, logger log c = &clientWorker{worker: w, client: client} } - go c.run() + go c.run(ctx) return c } func (w *worker) close() { - close(w.done) + w.cancel() } func (w *clientWorker) Close() error { @@ -84,20 +85,20 @@ func (w *clientWorker) Close() error { return w.client.Close() } -func (w *clientWorker) run() { +func (w *clientWorker) run(ctx context.Context) { for { // We wait for either the worker to be closed or for there to be a batch of // events to publish. select { - case <-w.done: + case <-ctx.Done(): return case batch := <-w.qu: if batch == nil { continue } - if err := w.client.Publish(context.TODO(), batch); err != nil { + if err := w.client.Publish(ctx, batch); err != nil { return } } @@ -109,7 +110,7 @@ func (w *netClientWorker) Close() error { return w.client.Close() } -func (w *netClientWorker) run() { +func (w *netClientWorker) run(ctx context.Context) { var ( connected = false reconnectAttempts = 0 @@ -120,7 +121,7 @@ func (w *netClientWorker) run() { // events to publish. select { - case <-w.done: + case <-ctx.Done(): return case batch := <-w.qu: @@ -139,7 +140,7 @@ func (w *netClientWorker) run() { w.logger.Infof("Attempting to reconnect to %v with %d reconnect attempt(s)", w.client, reconnectAttempts) } - err := w.client.Connect() + err := w.client.Connect(ctx) connected = err == nil if connected { w.logger.Infof("Connection to %v established", w.client) @@ -152,15 +153,14 @@ func (w *netClientWorker) run() { continue } - if err := w.publishBatch(batch); err != nil { + if err := w.publishBatch(ctx, batch); err != nil { connected = false } } } } -func (w *netClientWorker) publishBatch(batch publisher.Batch) error { - ctx := context.Background() +func (w *netClientWorker) publishBatch(ctx context.Context, batch publisher.Batch) error { if w.tracer != nil && w.tracer.Recording() { tx := w.tracer.StartTransaction("publish", "output") defer tx.End() diff --git a/libbeat/publisher/pipeline/testing.go b/libbeat/publisher/pipeline/testing.go index ca357646a81e..61977377a75b 100644 --- a/libbeat/publisher/pipeline/testing.go +++ b/libbeat/publisher/pipeline/testing.go @@ -54,7 +54,7 @@ type mockNetworkClient struct { outputs.Client } -func (c *mockNetworkClient) Connect() error { return nil } +func (c *mockNetworkClient) Connect(_ context.Context) error { return nil } type mockBatch struct { mu sync.Mutex diff --git a/libbeat/template/load_integration_test.go b/libbeat/template/load_integration_test.go index b3aafad5d692..4705f9be5a86 100644 --- a/libbeat/template/load_integration_test.go +++ b/libbeat/template/load_integration_test.go @@ -20,6 +20,7 @@ package template import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -66,7 +67,9 @@ func newTestSetup(t *testing.T, cfg TemplateConfig) *testSetup { cfg.Name = fmt.Sprintf("load-test-%+v", rand.Int()) } client := getTestingElasticsearch(t) - if err := client.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { t.Fatal(err) } handler := &mockClientHandler{serverless: false, mode: lifecycle.ILM} @@ -554,7 +557,9 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *eslegclient.Connection { conn.Encoder = eslegclient.NewJSONEncoder(nil, false) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) if err != nil { t.Fatal(err) panic(err) // panic in case TestLogger did not stop test @@ -586,7 +591,9 @@ func getMockElasticsearchClient(t *testing.T, method, endpoint string, code int, Transport: httpcommon.DefaultHTTPTransportSettings(), }) require.NoError(t, err) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) require.NoError(t, err) return conn } diff --git a/libbeat/tests/integration/elasticsearch_test.go b/libbeat/tests/integration/elasticsearch_test.go new file mode 100644 index 000000000000..6d8d1a46a080 --- /dev/null +++ b/libbeat/tests/integration/elasticsearch_test.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "errors" + "io" + "net/http" + "testing" + "time" + + "github.com/gofrs/uuid/v5" + "github.com/rcrowley/go-metrics" + "github.com/stretchr/testify/require" + + "github.com/elastic/mock-es/pkg/api" +) + +var esCfg = ` +mockbeat: +logging: + level: debug + selectors: + - publisher_pipeline_output + - esclientleg +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.elasticsearch: + allow_older_versions: true + hosts: + - "http://localhost:4242" + backoff: + init: 0.1s + max: 0.2s +` + +func TestESOutputRecoversFromNetworkError(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(esCfg) + + s, mr := startMockES(t, "localhost:4242") + + mockbeat.Start() + + // 1. Wait for one _bulk call + waitForEventToBePublished(t, mr) + + // 2. Stop the mock-es server + if err := s.Close(); err != nil { + t.Fatalf("cannot close mock-es server: %s", err) + } + + // 3. Wait for connection error logs + mockbeat.WaitForLogs( + `Get \"http://localhost:4242\": dial tcp 127.0.0.1:4242: connect: connection refused`, + 2*time.Second, + "did not find connection refused error") + + mockbeat.WaitForLogs( + "Attempting to reconnect to backoff(elasticsearch(http://localhost:4242)) with 2 reconnect attempt(s)", + 2*time.Second, + "did not find two tries to reconnect") + + // 4. Restart mock-es on the same port + s, mr = startMockES(t, "localhost:4242") + + // 5. Wait for reconnection logs + mockbeat.WaitForLogs( + "Connection to backoff(elasticsearch(http://localhost:4242)) established", + 5*time.Second, // There is a backoff, so ensure we wait enough + "did not find re connection confirmation") + + // 6. Ensure one new call to _bulk is made + waitForEventToBePublished(t, mr) + s.Close() +} + +func startMockES(t *testing.T, addr string) (*http.Server, metrics.Registry) { + uid := uuid.Must(uuid.NewV4()) + mr := metrics.NewRegistry() + es := api.NewAPIHandler(uid, "foo2", mr, time.Now().Add(24*time.Hour), 0, 0, 0, 0, 0) + + s := http.Server{Addr: addr, Handler: es, ReadHeaderTimeout: time.Second} + go func() { + if err := s.ListenAndServe(); !errors.Is(http.ErrServerClosed, err) { + t.Errorf("could not start mock-es server: %s", err) + } + }() + + require.Eventually(t, func() bool { + resp, err := http.Get("http://" + addr) //nolint: noctx // It's just a test + if err != nil { + //nolint: errcheck // We're just draining the body, we can ignore the error + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + return false + } + return true + }, + time.Second, time.Millisecond, "mock-es server did not start on '%s'", addr) + + return &s, mr +} + +// waitForEventToBePublished waits for at least one event published +// by inspecting the count for `bulk.create.total` in `mr`. Once +// the counter is > 1, waitForEventToBePublished returns. If that +// does not happen within 10min, then the test fails with a call to +// t.Fatal. +func waitForEventToBePublished(t *testing.T, mr metrics.Registry) { + t.Helper() + require.Eventually(t, func() bool { + total := mr.Get("bulk.create.total") + if total == nil { + return false + } + + sc, ok := total.(*metrics.StandardCounter) + if !ok { + t.Fatalf("expecting 'bulk.create.total' to be *metrics.StandardCounter, but got '%T' instead", + total, + ) + } + + return sc.Count() > 1 + }, + 10*time.Second, 100*time.Millisecond, + "at least one bulk request must be made") +} diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index 6495a7333797..e12573f84060 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -18,6 +18,7 @@ package beater import ( + "context" "flag" "fmt" "sync" @@ -111,7 +112,9 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { } overwritePipelines = config.OverwritePipelines b.OverwritePipelinesCallback = func(esConfig *conf.C) error { - esClient, err := eslegclient.NewConnectedClient(esConfig, "Packetbeat") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Packetbeat") if err != nil { return err } diff --git a/winlogbeat/beater/winlogbeat.go b/winlogbeat/beater/winlogbeat.go index e41aa54cb7f3..4e6b2b3657d5 100644 --- a/winlogbeat/beater/winlogbeat.go +++ b/winlogbeat/beater/winlogbeat.go @@ -108,7 +108,9 @@ func (eb *Winlogbeat) init(b *beat.Beat) error { } b.OverwritePipelinesCallback = func(esConfig *conf.C) error { overwritePipelines := config.OverwritePipelines - esClient, err := eslegclient.NewConnectedClient(esConfig, "Winlogbeat") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Winlogbeat") if err != nil { return err } diff --git a/x-pack/winlogbeat/module/testing.go b/x-pack/winlogbeat/module/testing.go index 3dc628b80a95..f1d38fceac82 100644 --- a/x-pack/winlogbeat/module/testing.go +++ b/x-pack/winlogbeat/module/testing.go @@ -5,6 +5,7 @@ package module import ( + "context" "encoding/json" "flag" "fmt" @@ -105,7 +106,9 @@ func testIngestPipeline(t *testing.T, pipeline, pattern string, p *params) { } defer conn.Close() - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) if err != nil { t.Fatalf("unexpected error making connection: %v", err) } diff --git a/x-pack/winlogbeat/module/wintest/docker_test.go b/x-pack/winlogbeat/module/wintest/docker_test.go index e45826f3b081..db7ab341a277 100644 --- a/x-pack/winlogbeat/module/wintest/docker_test.go +++ b/x-pack/winlogbeat/module/wintest/docker_test.go @@ -82,7 +82,9 @@ func TestDocker(t *testing.T) { } defer conn.Close() - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) if err != nil { t.Fatalf("unexpected error making connection: %v", err) } diff --git a/x-pack/winlogbeat/module/wintest/simulate_test.go b/x-pack/winlogbeat/module/wintest/simulate_test.go index 1bda1d5fb17c..b54d12f1d961 100644 --- a/x-pack/winlogbeat/module/wintest/simulate_test.go +++ b/x-pack/winlogbeat/module/wintest/simulate_test.go @@ -11,6 +11,7 @@ package wintest_test import ( + "context" "encoding/json" "fmt" "os" @@ -72,7 +73,9 @@ func TestSimulate(t *testing.T) { } defer conn.Close() - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) if err != nil { t.Fatalf("unexpected error making connection: %v", err) } From a54747368bde3307bac377a62b8588d746db5d79 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Tue, 29 Oct 2024 09:36:12 +0200 Subject: [PATCH 88/90] Awscloudwatchtags (#41388) * adding fix for aws tags of cloudwatch * updating docs * Update x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go Co-authored-by: kaiyan-sheng * Update x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go Co-authored-by: kaiyan-sheng * adding a validation for the LimitRestAPI * setting to max limit to 500 * setting to max limit to 500 * removing uneeded sdk --------- Co-authored-by: kaiyan-sheng --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 436 +++++++++++++++++- go.mod | 8 +- go.sum | 16 +- metricbeat/docs/modules/aws.asciidoc | 19 + .../metricbeat/module/aws/_meta/docs.asciidoc | 19 + x-pack/metricbeat/module/aws/aws.go | 13 + .../module/aws/cloudwatch/cloudwatch.go | 103 ++++- .../module/aws/cloudwatch/cloudwatch_test.go | 27 +- x-pack/metricbeat/module/aws/utils.go | 50 ++ 10 files changed, 653 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c1084282c1b3..797db99b7ca4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -362,6 +362,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add new metricset cluster for the vSphere module. {pull}40536[40536] - Add new metricset network for the vSphere module. {pull}40559[40559] - Add new metricset resourcepool for the vSphere module. {pull}40456[40456] +- Add AWS Cloudwatch capability to retrieve tags from AWS/ApiGateway resources {pull}40755[40755] - Add new metricset datastorecluster for vSphere module. {pull}40634[40634] - Add support for new metrics in datastorecluster metricset. {pull}40694[40694] - Add new metrics for the vSphere Virtualmachine metricset. {pull}40485[40485] diff --git a/NOTICE.txt b/NOTICE.txt index dcd146fe27a1..ca8f0a435085 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -5063,11 +5063,11 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-lambda-go@v1.44 -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2 -Version: v1.30.4 +Version: v1.30.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v1.30.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v1.30.5/LICENSE.txt: Apache License @@ -6121,6 +6121,430 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/featu limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/service/apigateway +Version: v1.25.8 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/apigateway@v1.25.8/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/service/apigatewayv2 +Version: v1.22.8 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/apigatewayv2@v1.22.8/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2/service/cloudformation Version: v1.53.5 @@ -34993,11 +35417,11 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/p -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2/internal/configsources -Version: v1.3.16 +Version: v1.3.17 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.17/LICENSE.txt: Apache License @@ -35205,11 +35629,11 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -Version: v2.6.16 +Version: v2.6.17 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.17/LICENSE.txt: Apache License diff --git a/go.mod b/go.mod index fe7f8ea884d2..3e2fe304b676 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 // indirect github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/aws/aws-lambda-go v1.44.0 - github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/config v1.27.29 github.com/aws/aws-sdk-go-v2/credentials v1.17.29 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5 @@ -179,6 +179,8 @@ require ( github.com/apache/arrow/go/v14 v14.0.2 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13 + github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.8 + github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.8 github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.5 github.com/aws/aws-sdk-go-v2/service/health v1.26.4 github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 @@ -257,8 +259,8 @@ require ( github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect diff --git a/go.sum b/go.sum index 124206af0401..e27981da519d 100644 --- a/go.sum +++ b/go.sum @@ -159,8 +159,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-lambda-go v1.44.0 h1:Xp9PANXKsSJ23IhE4ths592uWTCEewswPhSH9qpAuQQ= github.com/aws/aws-lambda-go v1.44.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= github.com/aws/aws-sdk-go-v2/config v1.27.29 h1:+ZPKb3u9Up4KZWLGTtpTmC5T3XmRD1ZQ8XQjRCHUvJw= @@ -171,14 +171,18 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13 h1:X8EeaOjl91c8sP14NG8EHx5ZxXLJg0tHDp+KQSghp28= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13/go.mod h1:kEI/h2bETfm09LSd7xEEH2qcU1cd//+5HH4Le7p9JgY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.8 h1:CgEyY7gfTf7lHYcCi7+w6jJ1XQBugjpadtsuN3TGxdQ= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.8/go.mod h1:z99ur4Ha5540t8hb5XtqV/UMOnEoEZK22lhr5ZBS0zw= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.8 h1:SWBNBbVbThg5Hdi3hWbVaDFjV/OyPbuqZLu4N+mj/Es= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.8/go.mod h1:lz2IT8gzzSwao0Pa6uMSdCIPsprmgCkW83q6sHGZFDw= github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.5 h1:YeTVIy7cJLeahs7K0jQGDGAd1YYND/to/z8N3kqZBhY= github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.5/go.mod h1:y45SdA9v+dLlweaqwAQMoFeXqdRvgwevafa2X8iTqZQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5 h1:/YvqO1j75i4leoV+Z3a5s/dAlEszf2wTKBW8jc3Gd4s= diff --git a/metricbeat/docs/modules/aws.asciidoc b/metricbeat/docs/modules/aws.asciidoc index 0ee7f601052f..291e2b7c09b8 100644 --- a/metricbeat/docs/modules/aws.asciidoc +++ b/metricbeat/docs/modules/aws.asciidoc @@ -146,6 +146,25 @@ Enforces the use of FIPS service endpoints. See < 500 { + base.Logger().Debug("apigateway_max_results config value can not exceed value 500. Setting apigateway_max_results=500") + *config.LimitRestAPI = 500 + } else if *config.LimitRestAPI <= 0 { + base.Logger().Debug("apigateway_max_results config value can not be <=0. Setting apigateway_max_results=25") + *config.LimitRestAPI = 25 + } + } + // Construct MetricSet with a full regions list if config.Regions == nil { svcEC2 := ec2.NewFromConfig(awsConfig, func(o *ec2.Options) { diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go index ed043e8c38f1..355c6710093c 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go @@ -6,12 +6,15 @@ package cloudwatch import ( "fmt" + "maps" "reflect" "strconv" "strings" "time" awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/apigateway" + "github.com/aws/aws-sdk-go-v2/service/apigatewayv2" "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" @@ -23,13 +26,25 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) +const checkns = "AWS/ApiGateway" +const checkresource_type = "apigateway:restapis" + var ( - metricsetName = "cloudwatch" - defaultStatistics = []string{"Average", "Maximum", "Minimum", "Sum", "SampleCount"} - dimensionSeparator = "," - dimensionValueWildcard = "*" + metricsetName = "cloudwatch" + defaultStatistics = []string{"Average", "Maximum", "Minimum", "Sum", "SampleCount"} + dimensionSeparator = "," + dimensionValueWildcard = "*" + checkns_lower = strings.ToLower(checkns) + checkresource_type_lower = strings.ToLower(checkresource_type) ) +type APIClients struct { + CloudWatchClient *cloudwatch.Client + Resourcegroupstaggingapi *resourcegroupstaggingapi.Client + Apigateway *apigateway.Client + Apigatewayv2 *apigatewayv2.Client +} + // init registers the MetricSet with the central registry as soon as the program // starts. The New function will be called later to instantiate an instance of // the MetricSet for each host defined in the module's configuration. After the @@ -123,7 +138,8 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.Period, m.Latency, m.PreviousEndTime) m.PreviousEndTime = endTime m.Logger().Debugf("startTime = %s, endTime = %s", startTime, endTime) - + // Initialise the map that will be used in case APIGateway api is configured. Infoapi includes Name_of_API:ID_of_API entries + infoapi := make(map[string]string) // Check statistic method in config err := m.checkStatistics() if err != nil { @@ -147,13 +163,12 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { m.logger.Debugf("Collecting metrics from AWS region %s", regionName) beatsConfig := m.MetricSet.AwsConfig.Copy() beatsConfig.Region = regionName - - svcCloudwatch, svcResourceAPI, err := m.createAwsRequiredClients(beatsConfig, regionName, config) + APIClients, err := m.createAwsRequiredClients(beatsConfig, regionName, config) if err != nil { m.Logger().Warn("skipping metrics list from region '%s'", regionName) } - eventsWithIdentifier, err := m.createEvents(svcCloudwatch, svcResourceAPI, listMetricDetailTotal.metricsWithStats, listMetricDetailTotal.resourceTypeFilters, regionName, startTime, endTime) + eventsWithIdentifier, err := m.createEvents(APIClients.CloudWatchClient, APIClients.Resourcegroupstaggingapi, listMetricDetailTotal.metricsWithStats, listMetricDetailTotal.resourceTypeFilters, infoapi, regionName, startTime, endTime) if err != nil { return fmt.Errorf("createEvents failed for region %s: %w", regionName, err) } @@ -173,7 +188,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { beatsConfig := m.MetricSet.AwsConfig.Copy() beatsConfig.Region = regionName - svcCloudwatch, svcResourceAPI, err := m.createAwsRequiredClients(beatsConfig, regionName, config) + APIClients, err := m.createAwsRequiredClients(beatsConfig, regionName, config) if err != nil { m.Logger().Warn("skipping metrics list from region '%s'", regionName, err) continue @@ -183,13 +198,13 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { // otherwise only retrieve metrics from the specific namespaces from the config var listMetricsOutput []aws.MetricWithID if len(namespaceDetailTotal) == 0 { - listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) + listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, "*", err) } } else { for namespace := range namespaceDetailTotal { - listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) + listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, namespace, err) } @@ -203,14 +218,50 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { for namespace, namespaceDetails := range namespaceDetailTotal { m.logger.Debugf("Collected metrics from namespace %s", namespace) - // filter listMetricsOutput by detailed configuration per each namespace filteredMetricWithStatsTotal := filterListMetricsOutput(listMetricsOutput, namespace, namespaceDetails) // get resource type filters and tags filters for each namespace resourceTypeTagFilters := constructTagsFilters(namespaceDetails) - eventsWithIdentifier, err := m.createEvents(svcCloudwatch, svcResourceAPI, filteredMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + //Check whether namespace is APIGW + if strings.Contains(strings.ToLower(namespace), checkns_lower) { + useonlyrest := false + if len(resourceTypeTagFilters) == 1 { + for key := range resourceTypeTagFilters { + if strings.Compare(strings.ToLower(key), checkresource_type_lower) == 0 { + useonlyrest = true + } + } + } + // inforestapi includes only Rest APIs + if useonlyrest { + infoapi, err = aws.GetAPIGatewayRestAPIOutput(APIClients.Apigateway, config.LimitRestAPI) + if err != nil { + m.Logger().Errorf("could not get rest apis output: %v", err) + } + } else { + // infoapi includes only Rest APIs + // apiGatewayAPI includes only WebSocket and HTTP APIs + infoapi, err = aws.GetAPIGatewayRestAPIOutput(APIClients.Apigateway, config.LimitRestAPI) + if err != nil { + m.Logger().Errorf("could not get rest apis output: %v", err) + } + + apiGatewayAPI, err := aws.GetAPIGatewayAPIOutput(APIClients.Apigatewayv2) + if err != nil { + m.Logger().Errorf("could not get http and websocket apis output: %v", err) + } + if len(apiGatewayAPI) > 0 { + maps.Copy(infoapi, apiGatewayAPI) + } + + } + + m.Logger().Debugf("infoapi response: %v", infoapi) + + } + eventsWithIdentifier, err := m.createEvents(APIClients.CloudWatchClient, APIClients.Resourcegroupstaggingapi, filteredMetricWithStatsTotal, resourceTypeTagFilters, infoapi, regionName, startTime, endTime) if err != nil { return fmt.Errorf("createEvents failed for region %s: %w", regionName, err) } @@ -233,23 +284,32 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { } // createAwsRequiredClients will return the two necessary client instances to do Metric requests to the AWS API -func (m *MetricSet) createAwsRequiredClients(beatsConfig awssdk.Config, regionName string, config aws.Config) (*cloudwatch.Client, *resourcegroupstaggingapi.Client, error) { +func (m *MetricSet) createAwsRequiredClients(beatsConfig awssdk.Config, regionName string, config aws.Config) (APIClients, error) { m.logger.Debugf("Collecting metrics from AWS region %s", regionName) - svcCloudwatchClient := cloudwatch.NewFromConfig(beatsConfig, func(o *cloudwatch.Options) { + APIClients := APIClients{} + APIClients.CloudWatchClient = cloudwatch.NewFromConfig(beatsConfig, func(o *cloudwatch.Options) { if config.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } }) - svcResourceAPIClient := resourcegroupstaggingapi.NewFromConfig(beatsConfig, func(o *resourcegroupstaggingapi.Options) { + APIClients.Resourcegroupstaggingapi = resourcegroupstaggingapi.NewFromConfig(beatsConfig, func(o *resourcegroupstaggingapi.Options) { if config.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } }) - return svcCloudwatchClient, svcResourceAPIClient, nil + APIClients.Apigateway = apigateway.NewFromConfig(beatsConfig, func(o *apigateway.Options) { + + }) + + APIClients.Apigatewayv2 = apigatewayv2.NewFromConfig(beatsConfig, func(o *apigatewayv2.Options) { + + }) + + return APIClients, nil } // filterListMetricsOutput compares config details with listMetricsOutput and filter out the ones don't match @@ -470,7 +530,7 @@ func insertRootFields(event mb.Event, metricValue float64, labels []string) mb.E return event } -func (m *MetricSet) createEvents(svcCloudwatch cloudwatch.GetMetricDataAPIClient, svcResourceAPI resourcegroupstaggingapi.GetResourcesAPIClient, listMetricWithStatsTotal []metricsWithStatistics, resourceTypeTagFilters map[string][]aws.Tag, regionName string, startTime time.Time, endTime time.Time) (map[string]mb.Event, error) { +func (m *MetricSet) createEvents(svcCloudwatch cloudwatch.GetMetricDataAPIClient, svcResourceAPI resourcegroupstaggingapi.GetResourcesAPIClient, listMetricWithStatsTotal []metricsWithStatistics, resourceTypeTagFilters map[string][]aws.Tag, infoAPImap map[string]string, regionName string, startTime time.Time, endTime time.Time) (map[string]mb.Event, error) { // Initialize events for each identifier. events := make(map[string]mb.Event) @@ -580,6 +640,13 @@ func (m *MetricSet) createEvents(svcCloudwatch cloudwatch.GetMetricDataAPIClient // And tags are only store under s3BucketName in resourceTagMap. subIdentifiers := strings.Split(identifierValue, dimensionSeparator) for _, subIdentifier := range subIdentifiers { + + if len(infoAPImap) > 0 { // If infoAPImap includes data + if valAPIName, ok := infoAPImap[subIdentifier]; ok { + subIdentifier = valAPIName + } + } + if _, ok := events[uniqueIdentifierValue]; !ok { // when tagsFilter is not empty but no entry in // resourceTagMap for this identifier, do not initialize diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go index 08f878f9bb39..45b250c4f769 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go-v2/service/apigateway" cloudwatchtypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" resourcegroupstaggingapitypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types" "github.com/aws/smithy-go/middleware" @@ -1255,6 +1256,14 @@ func (m *MockResourceGroupsTaggingClient) GetResources(context.Context, *resourc }, nil } +// MockResourceGroupsTaggingClient2 is used for unit tests. +type MockResourceGroupsTaggingClient2 struct{} + +// GetResources implements resourcegroupstaggingapi.GetResourcesAPIClient. +func (m *MockResourceGroupsTaggingClient2) GetResources(context.Context, *apigateway.GetResourcesInput, ...func(*apigateway.Options)) (*apigateway.GetResourcesOutput, error) { + return &apigateway.GetResourcesOutput{}, nil +} + func TestCreateEventsWithIdentifier(t *testing.T) { m := MetricSet{} m.CloudwatchConfigs = []Config{{Statistic: []string{"Average"}}} @@ -1262,6 +1271,8 @@ func TestCreateEventsWithIdentifier(t *testing.T) { m.logger = logp.NewLogger("test") mockTaggingSvc := &MockResourceGroupsTaggingClient{} + infoAPImap := make(map[string]string) + mockCloudwatchSvc := &MockCloudWatchClient{} listMetricWithStatsTotal := []metricsWithStatistics{{ listMetric1, @@ -1272,7 +1283,7 @@ func TestCreateEventsWithIdentifier(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, 1, len(events)) @@ -1293,6 +1304,7 @@ func TestCreateEventsWithoutIdentifier(t *testing.T) { mockTaggingSvc := &MockResourceGroupsTaggingClient{} mockCloudwatchSvc := &MockCloudWatchClientWithoutDim{} + infoAPImap := make(map[string]string) listMetricWithStatsTotal := []metricsWithStatistics{ { cloudwatchMetric: aws.MetricWithID{ @@ -1318,7 +1330,7 @@ func TestCreateEventsWithoutIdentifier(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) expectedID := " " + " " + regionName + accountID + namespace @@ -1339,6 +1351,7 @@ func TestCreateEventsWithDataGranularity(t *testing.T) { mockTaggingSvc := &MockResourceGroupsTaggingClient{} mockCloudwatchSvc := &MockCloudWatchClientWithDataGranularity{} + infoAPImap := make(map[string]string) listMetricWithStatsTotal := []metricsWithStatistics{ { listMetric1, @@ -1354,7 +1367,7 @@ func TestCreateEventsWithDataGranularity(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) expectedID := " " + regionName + accountID @@ -1380,6 +1393,7 @@ func TestCreateEventsWithTagsFilter(t *testing.T) { m.logger = logp.NewLogger("test") mockTaggingSvc := &MockResourceGroupsTaggingClient{} + infoAPImap := make(map[string]string) mockCloudwatchSvc := &MockCloudWatchClient{} listMetricWithStatsTotal := []metricsWithStatistics{ { @@ -1398,7 +1412,7 @@ func TestCreateEventsWithTagsFilter(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, 1, len(events)) @@ -1410,7 +1424,7 @@ func TestCreateEventsWithTagsFilter(t *testing.T) { }, } - events, err = m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err = m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, 0, len(events)) } @@ -1560,12 +1574,13 @@ func TestCreateEventsTimestamp(t *testing.T) { } resourceTypeTagFilters := map[string][]aws.Tag{} + infoAPImap := make(map[string]string) var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) cloudwatchMock := &MockCloudWatchClientWithoutDim{} resGroupTaggingClientMock := &MockResourceGroupsTaggingClient{} - events, err := m.createEvents(cloudwatchMock, resGroupTaggingClientMock, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(cloudwatchMock, resGroupTaggingClientMock, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, timestamp, events[" "+regionName+accountID+namespace+"-0"].Timestamp) } diff --git a/x-pack/metricbeat/module/aws/utils.go b/x-pack/metricbeat/module/aws/utils.go index b5c1a9249137..b233786c4666 100644 --- a/x-pack/metricbeat/module/aws/utils.go +++ b/x-pack/metricbeat/module/aws/utils.go @@ -11,7 +11,11 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/apigateway" + "github.com/aws/aws-sdk-go-v2/service/apigatewayv2" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" @@ -107,6 +111,52 @@ func GetListMetricsOutput(namespace string, regionName string, period time.Durat return metricWithAccountID, nil } +// GetAPIGatewayRestAPIOutput function gets results from apigw api. +// GetRestApis Apigateway API is used to retrieve only the REST API specified info. This returns a map with the names and ids of RestAPIs configured +// Limit variable defines maximum number of returned results per page. The default value is 25 and the maximum value is 500. +func GetAPIGatewayRestAPIOutput(svcRestApi *apigateway.Client, limit *int32) (map[string]string, error) { + input := &apigateway.GetRestApisInput{} + if limit != nil { + input = &apigateway.GetRestApisInput{ + Limit: limit, + } + } + ctx, cancel := getContextWithTimeout(DefaultApiTimeout) + defer cancel() + result, err := svcRestApi.GetRestApis(ctx, input) + if err != nil { + return nil, fmt.Errorf("error retrieving GetRestApis %w", err) + } + + // Iterate and display the APIs + infoRestAPImap := make(map[string]string, len(result.Items)) + for _, api := range result.Items { + infoRestAPImap[aws.ToString(api.Name)] = aws.ToString(api.Id) + } + return infoRestAPImap, nil +} + +// GetAPIGatewayAPIOutput function gets results from apigatewayv2 api. +// GetApis Apigateway API is used to retrieve the HTTP and WEBSOCKET specified info. This returns a map with the names and ids of relevant APIs configured +func GetAPIGatewayAPIOutput(svcHttpApi *apigatewayv2.Client) (map[string]string, error) { + input := &apigatewayv2.GetApisInput{} + + ctx, cancel := getContextWithTimeout(DefaultApiTimeout) + defer cancel() + result, err := svcHttpApi.GetApis(ctx, input) + + if err != nil { + return nil, fmt.Errorf("error retrieving GetApis %w", err) + } + + // Iterate and display the APIs + infoAPImap := make(map[string]string, len(result.Items)) + for _, api := range result.Items { + infoAPImap[aws.ToString(api.Name)] = aws.ToString(api.ApiId) + } + return infoAPImap, nil +} + // GetMetricDataResults function uses MetricDataQueries to get metric data output. func GetMetricDataResults(metricDataQueries []types.MetricDataQuery, svc cloudwatch.GetMetricDataAPIClient, startTime time.Time, endTime time.Time) ([]types.MetricDataResult, error) { maxNumberOfMetricsRetrieved := 500 From 6ae50388e8ffa5d74f7f2984ee36b3d93f768787 Mon Sep 17 00:00:00 2001 From: Olga Naydyonock Date: Tue, 29 Oct 2024 11:31:35 +0200 Subject: [PATCH 89/90] Beats macOS weekly pipeline (#41430) enabled weekly macos pipeline for main branch --- .buildkite/macos-tests-pipeline.yml | 571 +++++++++++++++++++++++++++- catalog-info.yaml | 11 +- 2 files changed, 565 insertions(+), 17 deletions(-) diff --git a/.buildkite/macos-tests-pipeline.yml b/.buildkite/macos-tests-pipeline.yml index 94f3f00248a4..f910f4f119a3 100644 --- a/.buildkite/macos-tests-pipeline.yml +++ b/.buildkite/macos-tests-pipeline.yml @@ -13,15 +13,564 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - - label: ":mac: Auditbeat: macOS x86_64 Unit Tests" - command: echo "test!" - retry: - automatic: - - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability - agents: - provider: "orka" - imagePrefix: "${IMAGE_MACOS_X86_64}" - artifact_paths: - - "auditbeat/build/*.xml" - - "auditbeat/build/*.json" + - group: "Auditbeat macOS Tests" + steps: + - label: ":mac: Auditbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Auditbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "Filebeat macOS Tests" + steps: + - label: ":mac: Filebeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Filebeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "Heartbeat macOS Tests" + steps: + - label: ":mac: Heartbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + + - label: ":mac: Heartbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + + - group: "Metricbeat macOS Tests" + steps: + - label: ":mac: Metricbeat: macOS x64_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Metricbeat: macOS arm64 Unit Tests" + skip: "Skipping due to elastic/beats#33035" + # https://github.com/elastic/beats/issues/33035 + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "Packetbeat macOS Tests" + steps: + - label: ":mac: Packetbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "packetbeat/build/*.xml" + - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Packetbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "packetbeat/build/*.xml" + - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/auditbeat macOS Tests" + steps: + - label: ":mac: x-pack/auditbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/auditbeat/build/*.xml" + - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/auditbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/auditbeat/build/*.xml" + - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/filebeat macOS Tests" + steps: + - label: ":mac: x-pack/filebeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/filebeat/build/*.xml" + - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/filebeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/filebeat/build/*.xml" + - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/heartbeat macOS Tests" + steps: + - label: ":mac: x-pack/heartbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + withNodeJSEnv $ASDF_NODEJS_VERSION + installNodeJsDependencies + cd x-pack/heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/heartbeat/build/*.xml" + - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/heartbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + withNodeJSEnv $ASDF_NODEJS_VERSION + installNodeJsDependencies + cd x-pack/heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/heartbeat/build/*.xml" + - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/metricbeat macOS Tests" + steps: + - label: ":mac: x-pack/metricbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/metricbeat/build/*.xml" + - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/metricbeat: macOS arm64 Unit Tests" + skip: "Skipping due to elastic/beats#33036 & elastic/beats#40496" + # https://github.com/elastic/beats/issues/33036 https://github.com/elastic/beats/issues/40496 + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/metricbeat/build/*.xml" + - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/osquerybeat macOS Tests" + steps: + - label: ":mac: x-pack/osquerybeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/osquerybeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/osquerybeat/build/*.xml" + - "x-pack/osquerybeat/build/*.json" + + - label: ":mac: x-pack/osquerybeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/osquerybeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/osquerybeat/build/*.xml" + - "x-pack/osquerybeat/build/*.json" + + - group: "x-pack/packetbeat macOS Tests" + steps: + - label: ":mac: x-pack/packetbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/packetbeat/build/*.xml" + - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/packetbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/packetbeat/build/*.xml" + - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true diff --git a/catalog-info.yaml b/catalog-info.yaml index d4a48a5a1aae..00a212b6295b 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -1175,6 +1175,11 @@ spec: cancel_intermediate_builds: false provider_settings: trigger_mode: none + schedules: + Weekly main: + branch: main + cronline: 0 0 * * 0 + message: Weekly build of macOS tests teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -1220,12 +1225,6 @@ spec: message: Daily trigger of Beats AWS tests env: PIPELINES_TO_TRIGGER: 'beats-aws-tests' - Weekly run of macOS tests: - branch: main - cronline: 0 0 * * 0 - message: Weekly trigger of Beats macOS tests - env: - PIPELINES_TO_TRIGGER: 'beats-macos-tests' skip_intermediate_builds: true provider_settings: trigger_mode: none From e20e486ad8b74653814c515691c4a5a981f5fd64 Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Tue, 29 Oct 2024 13:16:08 -0300 Subject: [PATCH 90/90] otelconsumer: adjustments to collector logic and retry/ack unit tests (#41457) The call to `ConsumeLogs` can fail in various ways. If the error is of type "Permanent", retrying will have no effect. For any other error, we need to retry the batch. Additionally, only acknowledge a batch when the collector confirms receipt of the data to avoid losing events. --- NOTICE.txt | 212 ++++++++++++++++++ go.sum | 2 + libbeat/outputs/otelconsumer/otelconsumer.go | 26 ++- .../outputs/otelconsumer/otelconsumer_test.go | 88 ++++++++ 4 files changed, 323 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index ca8f0a435085..76189f17cce2 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -55740,6 +55740,218 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdat limitations under the License. +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector/pdata/testdata +Version: v0.109.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdata/testdata@v0.109.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc Version: v0.49.0 diff --git a/go.sum b/go.sum index e27981da519d..ba2722f5baab 100644 --- a/go.sum +++ b/go.sum @@ -953,6 +953,8 @@ go.opentelemetry.io/collector/pdata v1.15.0 h1:q/T1sFpRKJnjDrUsHdJ6mq4uSqViR/f92 go.opentelemetry.io/collector/pdata v1.15.0/go.mod h1:2wcsTIiLAJSbqBq/XUUYbi+cP+N87d0jEJzmb9nT19U= go.opentelemetry.io/collector/pdata/pprofile v0.109.0 h1:5lobQKeHk8p4WC7KYbzL6ZqqX3eSizsdmp5vM8pQFBs= go.opentelemetry.io/collector/pdata/pprofile v0.109.0/go.mod h1:lXIifCdtR5ewO17JAYTUsclMqRp6h6dCowoXHhGyw8Y= +go.opentelemetry.io/collector/pdata/testdata v0.109.0 h1:gvIqy6juvqFET/6zi+zUOH1KZY/vtEDZW55u7gJ/hEo= +go.opentelemetry.io/collector/pdata/testdata v0.109.0/go.mod h1:zRttU/F5QMQ6ZXBMXCoSVG3EORTZLTK+UUS0VoMoT44= go.opentelemetry.io/collector/receiver v0.109.0 h1:DTOM7xaDl7FUGQIjvjmWZn03JUE+aG4mJzWWfb7S8zw= go.opentelemetry.io/collector/receiver v0.109.0/go.mod h1:jeiCHaf3PE6aXoZfHF5Uexg7aztu+Vkn9LVw0YDKm6g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= diff --git a/libbeat/outputs/otelconsumer/otelconsumer.go b/libbeat/outputs/otelconsumer/otelconsumer.go index 6eee8b58e657..cad11ab14428 100644 --- a/libbeat/outputs/otelconsumer/otelconsumer.go +++ b/libbeat/outputs/otelconsumer/otelconsumer.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" ) @@ -63,7 +64,7 @@ func (out *otelConsumer) Close() error { return nil } -// Publish converts Beat events to Otel format and send to the next otel consumer +// Publish converts Beat events to Otel format and sends them to the Otel collector func (out *otelConsumer) Publish(ctx context.Context, batch publisher.Batch) error { switch { case out.logsConsumer != nil: @@ -73,8 +74,7 @@ func (out *otelConsumer) Publish(ctx context.Context, batch publisher.Batch) err } } -func (out *otelConsumer) logsPublish(_ context.Context, batch publisher.Batch) error { - defer batch.ACK() +func (out *otelConsumer) logsPublish(ctx context.Context, batch publisher.Batch) error { st := out.observer pLogs := plog.NewLogs() resourceLogs := pLogs.ResourceLogs().AppendEmpty() @@ -97,10 +97,26 @@ func (out *otelConsumer) logsPublish(_ context.Context, batch publisher.Batch) e pcommonEvent.CopyTo(logRecord.Body().SetEmptyMap()) } - if err := out.logsConsumer.ConsumeLogs(context.TODO(), pLogs); err != nil { - return fmt.Errorf("error otel log consumer: %w", err) + err := out.logsConsumer.ConsumeLogs(ctx, pLogs) + if err != nil { + // Permanent errors shouldn't be retried. This tipically means + // the data cannot be serialized by the exporter that is attached + // to the pipeline or when the destination refuses the data because + // it cannot decode it. Retrying in this case is useless. + // + // See https://github.com/open-telemetry/opentelemetry-collector/blob/1c47d89/receiver/doc.go#L23-L40 + if consumererror.IsPermanent(err) { + st.PermanentErrors(len(events)) + batch.Drop() + } else { + st.RetryableErrors(len(events)) + batch.Retry() + } + + return fmt.Errorf("failed to send batch events to otel collector: %w", err) } + batch.ACK() st.NewBatch(len(events)) st.AckedEvents(len(events)) return nil diff --git a/libbeat/outputs/otelconsumer/otelconsumer_test.go b/libbeat/outputs/otelconsumer/otelconsumer_test.go index 1a8c34e21a08..a18bf77e6b89 100644 --- a/libbeat/outputs/otelconsumer/otelconsumer_test.go +++ b/libbeat/outputs/otelconsumer/otelconsumer_test.go @@ -18,15 +18,103 @@ package otelconsumer import ( + "context" + "errors" "testing" "time" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/outputs/outest" "github.com/elastic/elastic-agent-libs/mapstr" ) +func TestPublish(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + event1 := beat.Event{Fields: mapstr.M{"field": 1}} + event2 := beat.Event{Fields: mapstr.M{"field": 2}} + event3 := beat.Event{Fields: mapstr.M{"field": 3}} + + makeOtelConsumer := func(t *testing.T, consumeFn func(ctx context.Context, ld plog.Logs) error) *otelConsumer { + t.Helper() + + logConsumer, err := consumer.NewLogs(consumeFn) + assert.NoError(t, err) + consumer := &otelConsumer{ + observer: outputs.NewNilObserver(), + logsConsumer: logConsumer, + beatInfo: beat.Info{}, + } + return consumer + } + + t.Run("ack batch on consumer success", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + var countLogs int + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + countLogs = countLogs + ld.LogRecordCount() + return nil + }) + + err := otelConsumer.Publish(ctx, batch) + assert.NoError(t, err) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchACK, batch.Signals[0].Tag) + assert.Equal(t, len(batch.Events()), countLogs, "all events should be consumed") + }) + + t.Run("retries the batch on non-permanent consumer error", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return errors.New("consume error") + }) + + err := otelConsumer.Publish(ctx, batch) + assert.Error(t, err) + assert.False(t, consumererror.IsPermanent(err)) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchRetry, batch.Signals[0].Tag) + }) + + t.Run("drop batch on permanent consumer error", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return consumererror.NewPermanent(errors.New("consumer error")) + }) + + err := otelConsumer.Publish(ctx, batch) + assert.Error(t, err) + assert.True(t, consumererror.IsPermanent(err)) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchDrop, batch.Signals[0].Tag) + }) + + t.Run("retries on context cancelled", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return context.Canceled + }) + + err := otelConsumer.Publish(ctx, batch) + assert.Error(t, err) + assert.ErrorIs(t, err, context.Canceled) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchRetry, batch.Signals[0].Tag) + }) +} + func TestMapstrToPcommonMapString(t *testing.T) { tests := map[string]struct { mapstr_val interface{}