diff --git a/changelog/fragments/1732656422-add-actionable-error-message-for-enroll-command.yaml b/changelog/fragments/1732656422-add-actionable-error-message-for-enroll-command.yaml new file mode 100644 index 00000000000..5bc0e037318 --- /dev/null +++ b/changelog/fragments/1732656422-add-actionable-error-message-for-enroll-command.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: enhancement + +# Change summary; a 80ish characters long description of the change. +summary: Elastic agent returns an actionable error message when a the use trying to execute the enroll command is not the same as the onwer of the elastic-agent program files + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6144 +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/4889 diff --git a/changelog/fragments/1734512365-embed-hints-inputs-in-agent-container-image.yaml b/changelog/fragments/1734512365-embed-hints-inputs-in-agent-container-image.yaml new file mode 100644 index 00000000000..c005769ca3a --- /dev/null +++ b/changelog/fragments/1734512365-embed-hints-inputs-in-agent-container-image.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: enhancement + +# Change summary; a 80ish characters long description of the change. +summary: Embed hints-based inputs in the Elastic Agent container image. + +# Long description; in case the summary is not enough to describe the change +# this field accommodates a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +description: This change includes the addition of hints-based inputs directly within the Elastic Agent container image, enabling streamlined configurations for input discovery when deployed in containerized environments. + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6381 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/5661 diff --git a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml index 7180cf79a49..e9b272adf20 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml @@ -1142,8 +1142,6 @@ spec: - mountPath: /hostfs/var/lib name: var-lib readOnly: true - - mountPath: /usr/share/elastic-agent/state/inputs.d - name: external-inputs - mountPath: /usr/share/elastic-agent/state name: agent-data - mountPath: /etc/elastic-agent/agent.yml @@ -1152,27 +1150,6 @@ spec: subPath: agent.yml dnsPolicy: ClusterFirstWithHostNet hostNetwork: true - initContainers: - - args: - - -c - - mkdir -p /etc/elastic-agent/inputs.d && mkdir -p /etc/elastic-agent/inputs.d - && wget -O - https://github.com/elastic/elastic-agent/archive/v9.0.0.tar.gz - | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0.0/deploy/kubernetes/elastic-agent-standalone/templates.d" - command: - - sh - image: busybox:1.36.1 - name: k8s-templates-downloader - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - runAsGroup: 1000 - runAsUser: 1000 - volumeMounts: - - mountPath: /etc/elastic-agent/inputs.d - name: external-inputs nodeSelector: kubernetes.io/os: linux serviceAccountName: agent-pernode-example @@ -1195,8 +1172,6 @@ spec: - hostPath: path: /var/lib name: var-lib - - emptyDir: {} - name: external-inputs - hostPath: path: /etc/elastic-agent/default/agent-pernode-example/state type: DirectoryOrCreate diff --git a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml index e428239a0b1..5529f8f49db 100644 --- a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml @@ -1158,8 +1158,6 @@ spec: - mountPath: /hostfs/var/lib name: var-lib readOnly: true - - mountPath: /usr/share/elastic-agent/state/inputs.d - name: external-inputs - mountPath: /usr/share/elastic-agent/state name: agent-data - mountPath: /etc/elastic-agent/agent.yml @@ -1168,27 +1166,6 @@ spec: subPath: agent.yml dnsPolicy: ClusterFirstWithHostNet hostNetwork: true - initContainers: - - args: - - -c - - mkdir -p /etc/elastic-agent/inputs.d && mkdir -p /etc/elastic-agent/inputs.d - && wget -O - https://github.com/elastic/elastic-agent/archive/v9.0.0.tar.gz - | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0.0/deploy/kubernetes/elastic-agent-standalone/templates.d" - command: - - sh - image: busybox:1.36.1 - name: k8s-templates-downloader - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - runAsGroup: 1000 - runAsUser: 1000 - volumeMounts: - - mountPath: /etc/elastic-agent/inputs.d - name: external-inputs nodeSelector: kubernetes.io/os: linux serviceAccountName: agent-pernode-example @@ -1211,8 +1188,6 @@ spec: - hostPath: path: /var/lib name: var-lib - - emptyDir: {} - name: external-inputs - hostPath: path: /etc/elastic-agent/default/agent-pernode-example/state type: DirectoryOrCreate diff --git a/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes.tpl b/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes.tpl index f8cc9a1c5fe..b8af13d2116 100644 --- a/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes.tpl +++ b/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes.tpl @@ -17,7 +17,6 @@ {{- include "elasticagent.kubernetes.config.state.statefulsets.init" $ -}} {{- include "elasticagent.kubernetes.config.state.storageclasses.init" $ -}} {{- include "elasticagent.kubernetes.config.kube_controller.init" $ -}} -{{- include "elasticagent.kubernetes.config.hints.init" $ -}} {{- include "elasticagent.kubernetes.config.audit_logs.init" $ -}} {{- include "elasticagent.kubernetes.config.container_logs.init" $ -}} {{- include "elasticagent.kubernetes.config.kubelet.containers.init" $ -}} @@ -28,4 +27,4 @@ {{- include "elasticagent.kubernetes.config.kube_proxy.init" $ -}} {{- include "elasticagent.kubernetes.config.kube_scheduler.init" $ -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes_hints.tpl b/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes_hints.tpl index 45c75220398..4388990db4d 100644 --- a/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes_hints.tpl +++ b/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_kubernetes_hints.tpl @@ -1,6 +1,2 @@ {{- define "elasticagent.kubernetes.config.hints.init" -}} -{{- if eq $.Values.kubernetes.hints.enabled true -}} -{{- $preset := $.Values.agent.presets.perNode -}} -{{- include "elasticagent.preset.applyOnce" (list $ $preset "elasticagent.kubernetes.pernode.preset") -}} -{{- end -}} {{- end -}} diff --git a/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_preset_pernode.tpl b/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_preset_pernode.tpl index fc06b01084b..3feb3f9f60d 100644 --- a/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_preset_pernode.tpl +++ b/deploy/helm/elastic-agent/templates/integrations/_kubernetes/_preset_pernode.tpl @@ -3,7 +3,6 @@ {{- include "elasticagent.preset.mutate.volumes" (list $ $.Values.agent.presets.perNode "elasticagent.kubernetes.pernode.preset.volumes") -}} {{- include "elasticagent.preset.mutate.outputs.byname" (list $ $.Values.agent.presets.perNode $.Values.kubernetes.output)}} {{- if and (eq $.Values.kubernetes.hints.enabled true) (eq $.Values.agent.fleet.enabled false) -}} -{{- include "elasticagent.preset.mutate.initcontainers" (list $ $.Values.agent.presets.perNode "elasticagent.kubernetes.pernode.preset.initcontainers") -}} {{- include "elasticagent.preset.mutate.providers.kubernetes.hints" (list $ $.Values.agent.presets.perNode "elasticagent.kubernetes.pernode.preset.providers.kubernetes.hints") -}} {{- end -}} {{- if or (eq $.Values.kubernetes.scheduler.enabled true) (eq $.Values.kubernetes.controller_manager.enabled true) -}} @@ -37,10 +36,6 @@ extraVolumeMounts: - name: var-lib mountPath: /hostfs/var/lib readOnly: true -{{- if eq $.Values.kubernetes.hints.enabled true }} -- name: external-inputs - mountPath: /usr/share/elastic-agent/state/inputs.d -{{- end }} {{- end -}} {{- define "elasticagent.kubernetes.pernode.preset.volumes" -}} @@ -63,34 +58,6 @@ extraVolumes: - name: var-lib hostPath: path: /var/lib -{{- if eq $.Values.kubernetes.hints.enabled true }} -- name: external-inputs - emptyDir: {} -{{- end }} -{{- end -}} - -{{- define "elasticagent.kubernetes.pernode.preset.initcontainers" -}} -initContainers: -- name: k8s-templates-downloader - image: busybox:1.36.1 - securityContext: - allowPrivilegeEscalation: false - privileged: false - runAsUser: 1000 - runAsGroup: 1000 - capabilities: - drop: - - ALL - command: [ 'sh' ] - args: - - -c - - >- - mkdir -p /etc/elastic-agent/inputs.d && - mkdir -p /etc/elastic-agent/inputs.d && - wget -O - https://github.com/elastic/elastic-agent/archive/v{{$.Values.agent.version}}.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-{{$.Values.agent.version}}/deploy/kubernetes/elastic-agent-standalone/templates.d" - volumeMounts: - - name: external-inputs - mountPath: /etc/elastic-agent/inputs.d {{- end -}} {{- define "elasticagent.kubernetes.pernode.preset.providers.kubernetes.hints" -}} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml index 95d9566cf31..742df26feda 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -28,6 +32,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -40,6 +46,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -62,7 +70,6 @@ inputs: path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false period: ${kubernetes.hints.activemq.broker.period|kubernetes.hints.activemq.period|'10s'} tags: - - forwarded - activemq-broker username: ${kubernetes.hints.activemq.broker.username|kubernetes.hints.activemq.username|'admin'} - condition: ${kubernetes.hints.activemq.queue.enabled} == true or ${kubernetes.hints.activemq.enabled} == true @@ -77,7 +84,6 @@ inputs: path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false period: ${kubernetes.hints.activemq.queue.period|kubernetes.hints.activemq.period|'10s'} tags: - - forwarded - activemq-queue username: ${kubernetes.hints.activemq.queue.username|kubernetes.hints.activemq.username|'admin'} - condition: ${kubernetes.hints.activemq.topic.enabled} == true or ${kubernetes.hints.activemq.enabled} == true @@ -92,7 +98,6 @@ inputs: path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false period: ${kubernetes.hints.activemq.topic.period|kubernetes.hints.activemq.period|'10s'} tags: - - forwarded - activemq-topic username: ${kubernetes.hints.activemq.topic.username|kubernetes.hints.activemq.username|'admin'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml index e1586d3c5ea..3520dca77fc 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -1,46 +1,4 @@ inputs: - - name: filestream-apache - id: filestream-apache-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true - data_stream: - dataset: apache.access - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.apache.access.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - apache-access - - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true - data_stream: - dataset: apache.error - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.apache.error.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - apache-error - data_stream.namespace: default - name: httpjson-apache id: httpjson-apache-${kubernetes.hints.container_id} type: httpjson @@ -139,3 +97,53 @@ inputs: period: ${kubernetes.hints.apache.status.period|kubernetes.hints.apache.period|'30s'} server_status_path: /server-status data_stream.namespace: default + - name: filestream-apache + id: filestream-apache-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.access + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.error + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - apache-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml index 1d6e88f57a6..5d15a8122ea 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -22,6 +24,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml index 659dd1ec979..e4c87ed361e 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml @@ -1,25 +1,4 @@ inputs: - - name: udp-cef - id: udp-cef-${kubernetes.hints.container_id} - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true - data_stream: - dataset: cef.log - type: logs - host: localhost:9003 - processors: - - rename: - fields: - - from: message - to: event.original - - decode_cef: - field: event.original - tags: - - cef - - forwarded - data_stream.namespace: default - name: tcp-cef id: tcp-cef-${kubernetes.hints.container_id} type: tcp @@ -52,6 +31,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -67,8 +48,31 @@ inputs: field: event.original prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - cef - forwarded data_stream.namespace: default + - name: udp-cef + id: udp-cef-${kubernetes.hints.container_id} + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + host: localhost:9003 + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + tags: + - cef + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml index 97bdf20b5a4..155b98f8699 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -30,6 +32,8 @@ inputs: target: _temp_ prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml index 760582f2305..1f1319c5ce7 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline.match: after multiline.max_lines: 5000 multiline.negate: true @@ -23,6 +25,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -33,6 +37,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -43,6 +49,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml index 80cdc1bb0fa..a9adaaf36a1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml @@ -44,6 +44,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -54,6 +56,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml index a5d43104711..bc00a23cc47 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -43,6 +45,8 @@ inputs: ignore_missing: true prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.deprecation.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -52,6 +56,8 @@ inputs: - .gz$ - _slowlog.log$ - _access.log$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -60,6 +66,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.gc.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -72,6 +80,8 @@ inputs: - '^CommandLine flags: ' - '^Memory: ' - ^{ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -89,6 +99,8 @@ inputs: target: "" prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.server.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -99,6 +111,8 @@ inputs: - _slowlog.log$ - _access.log$ - _deprecation.log$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -107,6 +121,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.slowlog.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -114,6 +130,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -122,6 +140,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default - name: elasticsearch/metrics-elasticsearch diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml index 44bd23b95d6..5ce213a2ff1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +22,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - fireeye-nx diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml index 2a69222df44..be3b22b57ac 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +22,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - haproxy-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml index b98a748f878..cc2249d1b6e 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - hashicorp-vault-audit @@ -27,6 +31,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -35,6 +41,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - hashicorp-vault-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml index 8f35f1980e2..7708e49d3e5 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml @@ -1,4 +1,31 @@ inputs: + - name: iis/metrics-iis + id: iis/metrics-iis-${kubernetes.hints.container_id} + type: iis/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.application_pool + type: metrics + metricsets: + - application_pool + period: ${kubernetes.hints.iis.application_pool.period|kubernetes.hints.iis.period|'10s'} + - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.webserver + type: metrics + metricsets: + - webserver + period: ${kubernetes.hints.iis.webserver.period|kubernetes.hints.iis.period|'10s'} + - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.website + type: metrics + metricsets: + - website + period: ${kubernetes.hints.iis.website.period|kubernetes.hints.iis.period|'10s'} + data_stream.namespace: default - name: filestream-iis id: filestream-iis-${kubernetes.hints.container_id} type: filestream @@ -12,6 +39,8 @@ inputs: - .gz$ exclude_lines: - ^# + file_identity: + fingerprint: null ignore_older: 72h parsers: - container: @@ -21,6 +50,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - iis-access @@ -32,6 +63,8 @@ inputs: - .gz$ exclude_lines: - ^# + file_identity: + fingerprint: null ignore_older: 72h parsers: - container: @@ -41,34 +74,9 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - iis-error data_stream.namespace: default - - name: iis/metrics-iis - id: iis/metrics-iis-${kubernetes.hints.container_id} - type: iis/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true - data_stream: - dataset: iis.application_pool - type: metrics - metricsets: - - application_pool - period: ${kubernetes.hints.iis.application_pool.period|kubernetes.hints.iis.period|'10s'} - - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true - data_stream: - dataset: iis.webserver - type: metrics - metricsets: - - webserver - period: ${kubernetes.hints.iis.webserver.period|kubernetes.hints.iis.period|'10s'} - - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true - data_stream: - dataset: iis.website - type: metrics - metricsets: - - website - period: ${kubernetes.hints.iis.website.period|kubernetes.hints.iis.period|'10s'} - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml index 413683e2d18..98c63da565a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml @@ -14,6 +14,8 @@ inputs: _conf: tz_offset: local fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -24,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml index 9ccbf653368..4455b0bcb22 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml @@ -1,4 +1,20 @@ inputs: + - name: udp-iptables + id: udp-iptables-${kubernetes.hints.container_id} + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - iptables-log + - forwarded + data_stream.namespace: default - name: filestream-iptables id: filestream-iptables-${kubernetes.hints.container_id} type: filestream @@ -10,6 +26,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +38,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - iptables-log @@ -41,19 +61,3 @@ inputs: tags: - iptables-log data_stream.namespace: default - - name: udp-iptables - id: udp-iptables-${kubernetes.hints.container_id} - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true - data_stream: - dataset: iptables.log - type: logs - host: localhost:9001 - processors: - - add_locale: null - tags: - - iptables-log - - forwarded - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml index a167b6e182f..3532eba99f9 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -1,4 +1,37 @@ inputs: + - name: filestream-kafka + id: filestream-kafka-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.log + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + multiline: + match: after + negate: true + pattern: ^\[ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kafka.log.stream|'all'} + paths: + - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - kafka-log + data_stream.namespace: default - name: kafka/metrics-kafka id: kafka/metrics-kafka-${kubernetes.hints.container_id} type: kafka/metrics @@ -36,32 +69,3 @@ inputs: period: ${kubernetes.hints.kafka.partition.period|kubernetes.hints.kafka.period|'10s'} username: ${kubernetes.hints.kafka.partition.username|kubernetes.hints.kafka.username|''} data_stream.namespace: default - - name: filestream-kafka - id: filestream-kafka-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true - data_stream: - dataset: kafka.log - type: logs - exclude_files: - - .gz$ - multiline: - match: after - negate: true - pattern: ^\[ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.kafka.log.stream|'all'} - paths: - - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - kafka-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml index 499a6e9d659..bf5e5e33465 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.kibana.log.enabled} == true or ${kubernetes.hints.kibana.enabled} == true data_stream: @@ -25,6 +29,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -33,6 +39,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default - name: http/metrics-kibana @@ -61,6 +69,27 @@ inputs: to: kibana.background_task_utilization ignore_missing: true username: ${kubernetes.hints.kibana.background_task_utilization.username|kubernetes.hints.kibana.username|''} + - condition: ${kubernetes.hints.kibana.task_manager_metrics.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.task_manager_metrics + type: metrics + hosts: + - ${kubernetes.hints.kibana.task_manager_metrics.host|kubernetes.hints.kibana.host|'http://localhost:5601'} + method: GET + metricsets: + - json + namespace: task_manager_metrics + password: ${kubernetes.hints.kibana.task_manager_metrics.password|kubernetes.hints.kibana.password|''} + path: /api/task_manager/metrics + period: ${kubernetes.hints.kibana.task_manager_metrics.period|kubernetes.hints.kibana.period|'10s'} + processors: + - rename: + fail_on_error: false + fields: + - from: http.task_manager_metrics + to: kibana.task_manager_metrics + ignore_missing: true + username: ${kubernetes.hints.kibana.task_manager_metrics.username|kubernetes.hints.kibana.username|''} data_stream.namespace: default - name: kibana/metrics-kibana id: kibana/metrics-kibana-${kubernetes.hints.container_id} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml index 89c3aed53ca..7b889c42cf4 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml @@ -1,32 +1,4 @@ inputs: - - name: logstash/metrics-logstash - id: logstash/metrics-logstash-${kubernetes.hints.container_id} - type: logstash/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true - data_stream: - dataset: logstash.stack_monitoring.node - type: metrics - hosts: - - ${kubernetes.hints.logstash.node.host|kubernetes.hints.logstash.host|'http://localhost:9600'} - metricsets: - - node - password: ${kubernetes.hints.logstash.node.password|kubernetes.hints.logstash.password|''} - period: ${kubernetes.hints.logstash.node.period|kubernetes.hints.logstash.period|'10s'} - username: ${kubernetes.hints.logstash.node.username|kubernetes.hints.logstash.username|''} - - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true - data_stream: - dataset: logstash.stack_monitoring.node_stats - type: metrics - hosts: - - ${kubernetes.hints.logstash.node_stats.host|kubernetes.hints.logstash.host|'http://localhost:9600'} - metricsets: - - node_stats - password: ${kubernetes.hints.logstash.node_stats.password|kubernetes.hints.logstash.password|''} - period: ${kubernetes.hints.logstash.node_stats.period|kubernetes.hints.logstash.period|'10s'} - username: ${kubernetes.hints.logstash.node_stats.username|kubernetes.hints.logstash.username|''} - data_stream.namespace: default - name: filestream-logstash id: filestream-logstash-${kubernetes.hints.container_id} type: filestream @@ -38,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -56,6 +30,8 @@ inputs: target: "" prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.logstash.slowlog.enabled} == true or ${kubernetes.hints.logstash.enabled} == true data_stream: @@ -63,6 +39,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -77,5 +55,329 @@ inputs: target: "" prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default + - name: logstash/metrics-logstash + id: logstash/metrics-logstash-${kubernetes.hints.container_id} + type: logstash/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.logstash.node.host|kubernetes.hints.logstash.host|'http://localhost:9600'} + metricsets: + - node + password: ${kubernetes.hints.logstash.node.password|kubernetes.hints.logstash.password|''} + period: ${kubernetes.hints.logstash.node.period|kubernetes.hints.logstash.period|'10s'} + username: ${kubernetes.hints.logstash.node.username|kubernetes.hints.logstash.username|''} + - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.logstash.node_stats.host|kubernetes.hints.logstash.host|'http://localhost:9600'} + metricsets: + - node_stats + password: ${kubernetes.hints.logstash.node_stats.password|kubernetes.hints.logstash.password|''} + period: ${kubernetes.hints.logstash.node_stats.period|kubernetes.hints.logstash.period|'10s'} + username: ${kubernetes.hints.logstash.node_stats.username|kubernetes.hints.logstash.username|''} + data_stream.namespace: default + - name: cel-logstash + id: cel-logstash-${kubernetes.hints.container_id} + type: cel + use_output: default + streams: + - auth.basic.password: null + auth.basic.user: null + condition: ${kubernetes.hints.logstash.node_cel.enabled} == true and ${kubernetes.hints.logstash.enabled} == true + config_version: "2" + data_stream: + dataset: logstash.node + type: metrics + interval: ${kubernetes.hints.logstash.node_cel.period|kubernetes.hints.logstash.period|'30s'} + program: "get(state.url)\n.as(resp, bytes(resp.Body)\n.decode_json().as(body,\n {\n \"logstash\":{\n \"elasticsearch\": has(body.pipelines) \n ? {\n \"cluster\":{\n \"id\":body.pipelines.map(pipeline_name, pipeline_name != \".monitoring-logstash\", has(body.pipelines[pipeline_name].vertices)\n ? body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), vertex.cluster_uuid) \n : []).flatten(),\n }\n }\n : {},\n \"node\":{\n \"stats\":{\n \"events\":body.events,\n \"jvm\":{\n \"uptime_in_millis\":body.jvm.uptime_in_millis,\n \"mem\":[body.jvm['mem']].drop(\"pools\")[0],\n \"threads\":body.jvm.threads\n },\n \"queue\":body.queue,\n \"reloads\":body.reloads,\n \"process\":body.process,\n \"os\":{\n \"cpu\":body.process.cpu,\n \"cgroup\":has(body.os.group) ? body.os.cgroup : {},\n },\n \"logstash\":{\n \"ephemeral_id\":body.ephemeral_id,\n \"host\":body.host,\n \"http_address\":body.http_address,\n \"name\":body.name,\n \"pipeline\":body.pipeline,\n \"pipelines\":body.pipelines.map(pipeline, pipeline != '.monitoring-logstash', [pipeline]).flatten(),\n \"snapshot\":body.snapshot,\n \"status\":body.status,\n \"uuid\":body.id,\n \"version\":body.version,\n }\n }}\n }})\n)\n.as(eve, {\n \"events\":[eve]\n})" + redact: + fields: null + resource.url: http://localhost:9600/_node/stats?graph=true&vertices=true + - auth.basic.password: null + auth.basic.user: null + condition: ${kubernetes.hints.logstash.pipeline.enabled} == true and ${kubernetes.hints.logstash.enabled} == true + config_version: "2" + data_stream: + dataset: logstash.pipeline + type: metrics + interval: ${kubernetes.hints.logstash.pipeline.period|kubernetes.hints.logstash.period|'30s'} + program: | + get(state.url).as(resp, bytes(resp.Body).decode_json().as(body, + body.pipelines.map(pipeline_name, pipeline_name != ".monitoring-logstash", { + "name": pipeline_name, + "elasticsearch.cluster.id": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), vertex.cluster_uuid) + : + [], + "host":{ + "name":body.name, + "address":body.http_address, + }, + "total":{ + "flow":body.pipelines[pipeline_name].flow, + "time":{ + "queue_push_duration": { + "ms": has(body.pipelines[pipeline_name].events.queue_push_duration_in_millis) ? + body.pipelines[pipeline_name].events.queue_push_duration_in_millis + : + [], + }, + "duration":{ + "ms": has(body.pipelines[pipeline_name].events.duration_in_millis) ? + body.pipelines[pipeline_name].events.duration_in_millis + : + [], + }, + }, + "reloads":{ + "successes":body.pipelines[pipeline_name].reloads.successes, + "failures":body.pipelines[pipeline_name].reloads.failures + }, + "events":{ + "out": has(body.pipelines[pipeline_name].events.out) ? + body.pipelines[pipeline_name].events.out + : + [], + "in": has(body.pipelines[pipeline_name].events.out) ? // This deliberately uses 'out' as `has` does not accept `in` + body.pipelines[pipeline_name].events['in'] + : + [], + "filtered": has(body.pipelines[pipeline_name].events.filtered) ? + body.pipelines[pipeline_name].events.filtered + : + [], + }, + "queues":{ + "type": has(body.pipelines[pipeline_name].queue.type) ? + body.pipelines[pipeline_name].queue.type + : + [], + "events": has(body.pipelines[pipeline_name].queue.events_count) ? + body.pipelines[pipeline_name].queue.events_count + : + [], + "current_size":{ + "bytes": has(body.pipelines[pipeline_name].queue.queue_size_in_bytes) ? + body.pipelines[pipeline_name].queue.queue_size_in_bytes + : + [], + }, + "max_size":{ + "bytes": has(body.pipelines[pipeline_name].queue.max_queue_size_in_bytes) ? + body.pipelines[pipeline_name].queue.max_queue_size_in_bytes + : + [], + } + } + } + }))).as(pipelines, { + "events": pipelines.map(pipeline, { + "logstash": {"pipeline":pipeline} + }) + }) + redact: + fields: null + resource.url: http://localhost:9600/_node/stats?graph=true&vertices=true + - auth.basic.password: null + auth.basic.user: null + condition: ${kubernetes.hints.logstash.plugins.enabled} == true and ${kubernetes.hints.logstash.enabled} == true + config_version: "2" + data_stream: + dataset: logstash.plugins + type: metrics + interval: ${kubernetes.hints.logstash.plugins.period|kubernetes.hints.logstash.period|'1m'} + program: | + get(state.url + "/stats?graph=true&vertices=true").as(resp, bytes(resp.Body).decode_json().as(body, + body.pipelines.map(pipeline_name, pipeline_name != ".monitoring-logstash", body.pipelines[pipeline_name].with({ + "name":pipeline_name, + "pipeline_source_map": + get(state.url + "/pipelines/" + pipeline_name + "?graph=true&vertices=true").as(resp, + bytes(resp.Body).decode_json().as(pipes, + has(pipes.pipeline) ? + pipes.pipelines.map(pipeline_name, + has(pipes.pipelines) && has(pipes.pipelines[pipeline_name].graph) && pipes.pipelines != null && pipes.pipelines[pipeline_name].graph.graph.vertices != null, + pipes.pipelines[pipeline_name].graph.graph.vertices.map(vertex, vertex.type == "plugin", { + "plugin_id": vertex.id, + "source": vertex.meta.source, + }) + ).drop("graph").flatten() + : + [] + ) + ), + "es_cluster_id": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), vertex.cluster_uuid) + : + [], + "es_cluster_id_map": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), { + "plugin_id": vertex.id, + "cluster_id": vertex.cluster_uuid, + }) + : + [], + "counter_map": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.long_counters), vertex.long_counters.map(counter, { + "plugin_id": vertex.id, + "name": counter.name, + "value": counter.value + })) + : + [], + "outputs": body.pipelines[pipeline_name].plugins.outputs, + "inputs": body.pipelines[pipeline_name].plugins.inputs, + "filters": body.pipelines[pipeline_name].plugins.filters, + "codecs": body.pipelines[pipeline_name].plugins.codecs, + "host":{ + "name": body.name, + "address": body.http_address, + } + })))).as(events, events.map(event, { + "inputs": event.inputs.map(input, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "input", + "input": { + "source":event.pipeline_source_map.map(tuple, (tuple.plugin_id == input.id), tuple.source).flatten().as(source, (source.size() != 0) ? source[0] : ""), + "elasticsearch.cluster.id": event.es_cluster_id_map.map(tuple, tuple.plugin_id == input.id, tuple.cluster_id), + "metrics": { + input.name: event.counter_map.flatten().filter(tuple, tuple.plugin_id == input.id).as(counter_map, zip( + counter_map.map(tuple, tuple.name), + counter_map.map(tuple, tuple.value) + )) + }, + "name": input.name, + "id": input.id, + "flow": has(input.flow) ? + input.flow + : + {}, + "events": { + "out": input.events.out, + }, + "time": { + "queue_push_duration": { + "ms": input.events.queue_push_duration_in_millis + } + } + } + } + }.drop_empty()), + "codecs": event.codecs.map(codec, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "codec", + "codec": { + "id":codec.id, + "name":codec.name, + "flow": has(codec.flow) ? codec.flow : {}, + "decode":{ + "duration":{ + "ms":codec.decode.duration_in_millis + }, + "in":codec.decode.writes_in, + "out":codec.decode.out, + }, + "encode":{ + "in":codec.encode.writes_in, + "duration":{ + "ms":codec.encode.duration_in_millis + } + } + } + } + }.drop_empty()), + "filters": event.filters.map(filter, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "filter", + "filter": { + "source":event.pipeline_source_map.map(tuple, (tuple.plugin_id == filter.id), tuple.source).flatten().as(source, (source.size() != 0) ? source[0] : ""), + "id": filter.id, + "name": filter.name, + "elasticsearch.cluster.id": event.es_cluster_id_map.map(tuple, tuple.plugin_id == filter.id, tuple.cluster_id), + "metrics": { + filter.name: event.counter_map.flatten().filter(tuple, tuple.plugin_id == filter.id).as(counter_map, zip( + counter_map.map(tuple, tuple.name), + counter_map.map(tuple, tuple.value) + )) + }, + "flow": has(filter.flow) ? + filter.flow + : + {}, + "events": { + "in": filter.events['in'], + "out": filter.events.out, + }, + "time": { + "duration": { + "ms": filter.events.duration_in_millis + } + } + } + } + }.drop_empty()), + "outputs": event.outputs.map(output, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "output", + "output": { + "id": output.id, + "name": output.name, + "source":event.pipeline_source_map.map(tuple, (tuple.plugin_id == output.id), tuple.source).flatten().as(source, (source.size() != 0) ? source[0] : ""), + "elasticsearch.cluster.id": event.es_cluster_id_map.map(tuple, tuple.plugin_id == output.id, tuple.cluster_id), + "metrics": { + output.name: event.counter_map.flatten().filter(tuple, tuple.plugin_id == output.id).as(counter_map, zip( + counter_map.map(tuple, tuple.name), + counter_map.map(tuple, tuple.value) + )) + }, + "flow": has(output.flow) ? + output.flow + : + {}, + "events":{ + "in":output.events['in'], + "out":output.events.out, + }, + "time":{ + "duration":{ + "ms":output.events.duration_in_millis + } + } + } + } + }.drop_empty()) + }).collate(["filters", "outputs", "inputs", "codecs"])).as(plugins, { + "events": plugins.map(plugin, { + "logstash":{"pipeline":plugin} + }) + }) + redact: + fields: null + resource.url: http://localhost:9600/_node + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml index 61235d40965..10b05293429 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml @@ -9,7 +9,9 @@ inputs: dataset: mattermost.audit type: logs exclude_files: - - .gz$ + - \.gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - mattermost-audit diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml index 5c9eb7fddc2..0a5ab1ef6cd 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml @@ -23,6 +23,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -35,6 +37,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - mssql-logs @@ -99,6 +103,7 @@ inputs: dataset: microsoft_sqlserver.transaction_log type: metrics driver: mssql + fetch_from_all_databases: false hosts: - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|kubernetes.hints.microsoft_sqlserver.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|kubernetes.hints.microsoft_sqlserver.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|kubernetes.hints.microsoft_sqlserver.host|'localhost'} metricsets: @@ -110,24 +115,24 @@ inputs: response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('master')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('master') ; response_format: table - - query: USE [master] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('master') ; + - query: USE [master]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('master') ; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', database_id FROM sys.databases WHERE name='model'; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('model')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('model') ; response_format: table - - query: USE [model] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('model') ; + - query: USE [model]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('model') ; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', database_id FROM sys.databases WHERE name='tempdb'; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('tempdb')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('tempdb') ; response_format: table - - query: USE [tempdb] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('tempdb') ; + - query: USE [tempdb]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('tempdb') ; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', database_id FROM sys.databases WHERE name='msdb'; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('msdb')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('msdb') ; response_format: table - - query: USE [msdb] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('msdb') ; + - query: USE [msdb]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('msdb') ; response_format: table data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml index c7a82f2812c..1e029fbba34 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml @@ -1,9 +1,1148 @@ inputs: + - name: cel-mimecast + id: cel-mimecast-${kubernetes.hints.container_id} + type: cel + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.archive_search_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.archive_search_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: logs + end_field: end + look_back: 24h + page_size: 100 + path: /api/archive/get-archive-search-logs + start_field: start + time_field: createTime + tags: + - forwarded + - mimecast-archive-search-logs + - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.audit_events + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "state.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token:\"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n {\n \"data\": state.?last_page.data.orValue([{\n state.start_field: state.?cursor.last.orValue(now - duration(state.look_back)).format(time_layout.RFC3339),\n state.end_field: now.format(time_layout.RFC3339),\n }]),\n }.as(req,\n post_request(state.url.trim_right(\"/\") + state.path, \"application/json\", \n {\n \"meta\": {\n \"pagination\": {\n \"pageSize\": state.page_size,\n ?\"pageToken\": state.?last_page.next,\n }\n },\n \"data\": req.data,\n }.encode_json()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ?\n {\n \"events\": body.data.map(e, {\"message\": e.encode_json()}),\n \"cursor\": {\n \"last\": ([now] + body.data.map(e, e[state.time_field].parse_time([\"2006-01-02T15:04:05-0700\", time_layout.RFC3339]))).max(),\n },\n ?\"last_page\": has(body.?meta.pagination.next) && size(body.data) != 0 ?\n optional.of({\n ?\"next\": body.?meta.pagination.next,\n \"data\": req.data,\n })\n :\n optional.none(),\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": has(body.?meta.pagination.next) && size(body.data) != 0,\n }\n :\n // Mimecast can return failure states with a 200. This\n // is detected by a non-empty fail array at the root\n // of the response body. Don't attempt to parse this\n // out, just dump the whole body into the error message.\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \":\" + string(resp.Body), // We know this is not empty.\n },\n },\n \"want_more\": false,\n }\n )\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \":\" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + end_field: endDateTime + look_back: 24h + page_size: 100 + path: /api/audit/get-audit-events + start_field: startDateTime + time_field: eventTime + tags: + - forwarded + - mimecast-audit-events + - condition: ${kubernetes.hints.mimecast.cloud_integrated_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.cloud_integrated_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between cloud_integrated_logs and siem_logs\n// If it is changed here changes should be reflected in the other data\n// streams. Do not differentiate the logic between these data streams\n// lightly; use the state variable for this unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n state.?cursor.work_list.orValue(state.types.map(t, {\"type\": t})).as(work_list, size(work_list) == 0 ?\n state.types.map(t, {\"type\": t})\n :\n work_list\n ).as(work_list,\n get_request(\n state.url.trim_right(\"/\") + state.path + \"?\" + {\n \"type\": [work_list[0].type],\n ?\"nextPage\": work_list[0].?next.optMap(next, [next]),\n ?\"dateRangeStartsAt\": state.?start.optMap(start, [start.format(\"2006-01-02\")]),\n ?\"dateRangeEndsAt\": state.?end.optMap(end, [end.format(\"2006-01-02\")]),\n ?\"pageSize\": state.?page_size.optMap(size, [string(int(size))]),\n }.format_query()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body,\n {\n \"events\": body.value.map(b, has(b.url),\n get(b.url).as(batch, batch.StatusCode == 200 ?\n bytes(batch.Body).mime(\"application/gzip\").mime(\"application/x-ndjson\").map(e,\n {\n \"message\": dyn(e.encode_json()),\n }\n )\n :\n [{\n \"error\": {\n \"code\": string(batch.StatusCode),\n \"id\": string(batch.Status),\n \"message\": \"GET \" + b.url + \": \" + (\n size(batch.Body) != 0 ?\n string(batch.Body)\n :\n string(batch.Status) + ' (' + string(batch.StatusCode) + ')'\n ),\n },\n }]\n )\n ).flatten(),\n \"cursor\": {\n \"work_list\": (\n \"@nextPage\" in body && size(body.value) != 0 ?\n [work_list[0].with({\"next\": body[\"@nextPage\"]})]\n :\n []\n ) + tail(work_list),\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": \"@nextPage\" in body && size(body.value) != 0,\n }.as(to_publish, to_publish.with({\n \"want_more\": to_publish.want_more || size(to_publish.cursor.work_list) != 0,\n }))\n ).as(state, \n // Check whether we still need to get more, but have\n // no event for this type. If we do, populate events\n // with a place-holder to be discarded by the ingest\n // pipeline.\n state.want_more && size(state.events) == 0 ?\n state.with({\"events\": [{\"message\": \"want_more\"}]})\n :\n state\n )\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"GET \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + end_field: dateRangeEndsAt + look_back: 24h + page_size: null + path: /siem/v1/batch/events/ci + start_field: dateRangeStartsAt + types: + - entities + - mailflow + - urlclick + tags: + - forwarded + - mimecast-cloud-integrated-logs + - condition: ${kubernetes.hints.mimecast.dlp_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.dlp_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: dlpLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/dlp/get-logs + start_field: from + time_field: eventTime + tags: + - forwarded + - mimecast-dlp-logs + - condition: ${kubernetes.hints.mimecast.message_release_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.message_release_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: heldReleaseLogs + end_field: end + look_back: 24h + page_size: 100 + path: /api/gateway/get-held-release-logs + start_field: start + time_field: released + tags: + - forwarded + - mimecast-message-release-logs + - condition: ${kubernetes.hints.mimecast.siem_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.siem_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between cloud_integrated_logs and siem_logs\n// If it is changed here changes should be reflected in the other data\n// streams. Do not differentiate the logic between these data streams\n// lightly; use the state variable for this unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n state.?cursor.work_list.orValue(state.types.map(t, {\"type\": t})).as(work_list, size(work_list) == 0 ?\n state.types.map(t, {\"type\": t})\n :\n work_list\n ).as(work_list,\n get_request(\n state.url.trim_right(\"/\") + state.path + \"?\" + {\n \"type\": [work_list[0].type],\n ?\"nextPage\": work_list[0].?next.optMap(next, [next]),\n ?\"dateRangeStartsAt\": state.?start.optMap(start, [start.format(\"2006-01-02\")]),\n ?\"dateRangeEndsAt\": state.?end.optMap(end, [end.format(\"2006-01-02\")]),\n ?\"pageSize\": state.?page_size.optMap(size, [string(int(size))]),\n }.format_query()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body,\n {\n \"events\": body.value.map(b, has(b.url),\n get(b.url).as(batch, batch.StatusCode == 200 ?\n bytes(batch.Body).mime(\"application/gzip\").mime(\"application/x-ndjson\").map(e,\n {\n \"message\": dyn(e.encode_json()),\n }\n )\n :\n [{\n \"error\": {\n \"code\": string(batch.StatusCode),\n \"id\": string(batch.Status),\n \"message\": \"GET \" + b.url + \": \" + (\n size(batch.Body) != 0 ?\n string(batch.Body)\n :\n string(batch.Status) + ' (' + string(batch.StatusCode) + ')'\n ),\n },\n }]\n )\n ).flatten(),\n \"cursor\": {\n \"work_list\": (\n \"@nextPage\" in body && size(body.value) != 0 ?\n [work_list[0].with({\"next\": body[\"@nextPage\"]})]\n :\n []\n ) + tail(work_list),\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": \"@nextPage\" in body && size(body.value) != 0,\n }.as(to_publish, to_publish.with({\n \"want_more\": to_publish.want_more || size(to_publish.cursor.work_list) != 0,\n }))\n ).as(state, \n // Check whether we still need to get more, but have\n // no event for this type. If we do, populate events\n // with a place-holder to be discarded by the ingest\n // pipeline.\n state.want_more && size(state.events) == 0 ?\n state.with({\"events\": [{\"message\": \"want_more\"}]})\n :\n state\n )\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"GET \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + end_field: dateRangeEndsAt + look_back: 24h + page_size: null + path: /siem/v1/batch/events/cg + start_field: dateRangeStartsAt + types: + - av + - delivery + - internal email protect + - impersonation protect + - journal + - process + - receipt + - attachment protect + - spam + - url protect + tags: + - forwarded + - mimecast-siem-logs + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_customer.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.threat_intel_malware_customer + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between threat_intel_malware_customer and\n// threat_intel_malware_grid. If it is changed here changes should be\n// reflected in the other data streams. Do not differentiate the logic\n// between these data streams lightly; use the state variable for this\n// unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n {\n \"data\": state.?last_page.data.orValue([{\n ?\"start\": has(state.?cursor.token) ? optional.none() :\n optional.of(state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339))),\n ?\"end\": has(state.?cursor.token) ? optional.none() :\n optional.of(now.format(time_layout.RFC3339)),\n \"feedType\": state.feed_type,\n ?\"token\": state.?cursor.token,\n \"fileType\": \"stix\",\n }]),\n }.as(req,\n post_request(state.url.trim_right(\"/\") + state.path, \"application/json\", \n req.encode_json()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ?\n {\n \"events\": body.objects.map(e, e.type == \"indicator\", {\"message\": e.encode_json()}),\n \"cursor\": {\n // The last timestamp may step past the last timestamp\n // seen for an indicator. We assume here that if another\n // type has a later timestamp, then the time at the API\n // has progressed past the last indicator and we do not\n // need to reach back that far.\n \"last\": ([now] + body.objects.map(e, timestamp(e.modified))).max().format(time_layout.RFC3339),\n ?\"token\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"][?0],\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"].hasValue(),\n }\n :\n // Mimecast can return failure states with a 200. This\n // is detected by a non-empty fail array at the root\n // of the response body. Don't attempt to parse this\n // out, just dump the whole body into the error message.\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + string(resp.Body), // We know this is not empty.\n },\n },\n \"want_more\": false,\n }\n )\n : resp.StatusCode == 429 ?\n // For reasons, Mimecast does not set X-RateLimit-* headers\n // until the rate limit has been exceeded, so treat 429 codes\n // as a sentinel to back off. We don't want to log errors and\n // we do not want to update the cursor, so return an empty\n // events array.\n {\n \"events\": [],\n // Log the rate limit excession at DEBUG level.\n \"rate_limited\": debug(\"rate_limit_exceeded\", bytes(resp.Body).decode_json().?fail[0].message.orValue(\"missing message\")),\n \"want_more\": false,\n }\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + feed_type: malware_customer + look_back: 24h + page_size: null + path: /api/ttp/threat-intel/get-feed + tags: + - forwarded + - mimecast-threat-intel-feed-malware-customer + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_grid.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.threat_intel_malware_grid + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between threat_intel_malware_customer and\n// threat_intel_malware_grid. If it is changed here changes should be\n// reflected in the other data streams. Do not differentiate the logic\n// between these data streams lightly; use the state variable for this\n// unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n {\n \"data\": state.?last_page.data.orValue([{\n ?\"start\": has(state.?cursor.token) ? optional.none() :\n optional.of(state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339))),\n ?\"end\": has(state.?cursor.token) ? optional.none() :\n optional.of(now.format(time_layout.RFC3339)),\n \"feedType\": state.feed_type,\n ?\"token\": state.?cursor.token,\n \"fileType\": \"stix\",\n }]),\n }.as(req,\n post_request(state.url.trim_right(\"/\") + state.path, \"application/json\", \n req.encode_json()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ?\n {\n \"events\": body.objects.map(e, e.type == \"indicator\", {\"message\": e.encode_json()}),\n \"cursor\": {\n // The last timestamp may step past the last timestamp\n // seen for an indicator. We assume here that if another\n // type has a later timestamp, then the time at the API\n // has progressed past the last indicator and we do not\n // need to reach back that far.\n \"last\": ([now] + body.objects.map(e, timestamp(e.modified))).max().format(time_layout.RFC3339),\n ?\"token\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"][?0],\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"].hasValue(),\n }\n :\n // Mimecast can return failure states with a 200. This\n // is detected by a non-empty fail array at the root\n // of the response body. Don't attempt to parse this\n // out, just dump the whole body into the error message.\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + string(resp.Body), // We know this is not empty.\n },\n },\n \"want_more\": false,\n }\n )\n : resp.StatusCode == 429 ?\n // For reasons, Mimecast does not set X-RateLimit-* headers\n // until the rate limit has been exceeded, so treat 429 codes\n // as a sentinel to back off. We don't want to log errors and\n // we do not want to update the cursor, so return an empty\n // events array.\n {\n \"events\": [],\n // Log the rate limit excession at DEBUG level.\n \"rate_limited\": debug(\"rate_limit_exceeded\", bytes(resp.Body).decode_json().?fail[0].message.orValue(\"missing message\")),\n \"want_more\": false,\n }\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + feed_type: malware_grid + look_back: 24h + page_size: null + path: /api/ttp/threat-intel/get-feed + tags: + - forwarded + - mimecast-threat-intel-feed-malware-grid + - condition: ${kubernetes.hints.mimecast.ttp_ap_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.ttp_ap_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: attachmentLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/ttp/attachment/get-logs + start_field: from + time_field: date + tags: + - forwarded + - mimecast-ttp-ap + - condition: ${kubernetes.hints.mimecast.ttp_ip_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.ttp_ip_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: impersonationLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/ttp/impersonation/get-logs + start_field: from + time_field: eventTime + tags: + - forwarded + - mimecast-ttp-ip + - condition: ${kubernetes.hints.mimecast.ttp_url_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.ttp_url_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: clickLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/ttp/url/get-logs + start_field: from + time_field: date + tags: + - forwarded + - mimecast-ttp-url + data_stream.namespace: default + - name: filestream-mimecast + id: filestream-mimecast-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true + data_stream: + dataset: mimecast.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default - name: httpjson-mimecast id: httpjson-mimecast-${kubernetes.hints.container_id} type: httpjson use_output: default streams: + - condition: ${kubernetes.hints.mimecast.archive_search_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.createTime]]' + data_stream: + dataset: mimecast.archive_search_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + target: body.meta.pagination.pageSize + value: 100 + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/archive/get-archive-search-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/archive/get-archive-search-logs + response.decode_as: application/json + response.pagination: + - delete: + target: body.data + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: |- + [[- if index .last_response.body.meta.pagination "next" -]] + [[- .last_response.body.meta.pagination.next -]] + [[- end -]] + response.split: + ignore_empty_value: true + split: + ignore_empty_value: true + keep_parent: false + target: body.logs + target: body.data + tags: + - forwarded + - mimecast-archive-search-logs - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true config_version: "2" cursor: @@ -135,6 +1274,7 @@ inputs: request.url: https://eu-api.mimecast.com/api/audit/get-siem-logs response.decode_as: application/zip response.split: + ignore_empty_value: true target: body.data transforms: - set: @@ -364,25 +1504,3 @@ inputs: - forwarded - mimecast-ttp-url data_stream.namespace: default - - name: filestream-mimecast - id: filestream-mimecast-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true - data_stream: - dataset: mimecast.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml index bfdfd059ebe..cc55ebbcf73 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml @@ -14,6 +14,8 @@ inputs: _conf: tz_offset: local fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -24,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - modsec-audit diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml index 6af480629d2..b0bd0b07245 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - mongodb-logs @@ -38,7 +42,7 @@ inputs: password: ${kubernetes.hints.mongodb.collstats.password|kubernetes.hints.mongodb.password|''} period: ${kubernetes.hints.mongodb.collstats.period|kubernetes.hints.mongodb.period|'10s'} ssl.certificate: null - ssl.enabled: null + ssl.enabled: false ssl.key: null ssl.verification_mode: null username: ${kubernetes.hints.mongodb.collstats.username|kubernetes.hints.mongodb.username|''} @@ -68,7 +72,7 @@ inputs: password: ${kubernetes.hints.mongodb.metrics.password|kubernetes.hints.mongodb.password|''} period: ${kubernetes.hints.mongodb.metrics.period|kubernetes.hints.mongodb.period|'10s'} ssl.certificate: null - ssl.enabled: null + ssl.enabled: false ssl.key: null ssl.verification_mode: null username: ${kubernetes.hints.mongodb.metrics.username|kubernetes.hints.mongodb.username|''} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml index df50544f5d9..b21edd74269 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml @@ -1,43 +1,4 @@ inputs: - - name: mysql/metrics-mysql - id: mysql/metrics-mysql-${kubernetes.hints.container_id} - type: mysql/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true - data_stream: - dataset: mysql.galera_status - type: metrics - hosts: - - ${kubernetes.hints.mysql.galera_status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} - metricsets: - - galera_status - password: ${kubernetes.hints.mysql.galera_status.password|kubernetes.hints.mysql.password|'test'} - period: ${kubernetes.hints.mysql.galera_status.period|kubernetes.hints.mysql.period|'10s'} - username: ${kubernetes.hints.mysql.galera_status.username|kubernetes.hints.mysql.username|'root'} - - condition: ${kubernetes.hints.mysql.performance.enabled} == true or ${kubernetes.hints.mysql.enabled} == true - data_stream: - dataset: mysql.performance - type: metrics - hosts: - - ${kubernetes.hints.mysql.performance.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} - metricsets: - - performance - password: ${kubernetes.hints.mysql.performance.password|kubernetes.hints.mysql.password|'test'} - period: ${kubernetes.hints.mysql.performance.period|kubernetes.hints.mysql.period|'10s'} - username: ${kubernetes.hints.mysql.performance.username|kubernetes.hints.mysql.username|'root'} - - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true - data_stream: - dataset: mysql.status - type: metrics - hosts: - - ${kubernetes.hints.mysql.status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} - metricsets: - - status - password: ${kubernetes.hints.mysql.status.password|kubernetes.hints.mysql.password|'test'} - period: ${kubernetes.hints.mysql.status.period|kubernetes.hints.mysql.period|'10s'} - username: ${kubernetes.hints.mysql.status.username|kubernetes.hints.mysql.username|'root'} - data_stream.namespace: default - name: filestream-mysql id: filestream-mysql-${kubernetes.hints.container_id} type: filestream @@ -49,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -63,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.mysql.slowlog.enabled} == true or ${kubernetes.hints.mysql.enabled} == true data_stream: @@ -73,6 +38,8 @@ inputs: exclude_lines: - '^[\/\w\.]+, Version: .* started with:.*' - ^# Time:.* + file_identity: + fingerprint: null multiline: match: after negate: true @@ -85,5 +52,46 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default + - name: mysql/metrics-mysql + id: mysql/metrics-mysql-${kubernetes.hints.container_id} + type: mysql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.galera_status + type: metrics + hosts: + - ${kubernetes.hints.mysql.galera_status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - galera_status + password: ${kubernetes.hints.mysql.galera_status.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.galera_status.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.galera_status.username|kubernetes.hints.mysql.username|'root'} + - condition: ${kubernetes.hints.mysql.performance.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.performance + type: metrics + hosts: + - ${kubernetes.hints.mysql.performance.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - performance + password: ${kubernetes.hints.mysql.performance.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.performance.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.performance.username|kubernetes.hints.mysql.username|'root'} + - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.status + type: metrics + hosts: + - ${kubernetes.hints.mysql.status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - status + password: ${kubernetes.hints.mysql.status.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.status.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.status.username|kubernetes.hints.mysql.username|'root'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml index 4630a5b5e9e..c75da289568 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml @@ -1,4 +1,31 @@ inputs: + - name: filestream-nats + id: filestream-nats-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.log + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nats.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - nats-log + data_stream.namespace: default - name: nats/metrics-nats id: nats/metrics-nats-${kubernetes.hints.container_id} type: nats/metrics @@ -59,26 +86,3 @@ inputs: - subscriptions period: ${kubernetes.hints.nats.subscriptions.period|kubernetes.hints.nats.period|'10s'} data_stream.namespace: default - - name: filestream-nats - id: filestream-nats-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true - data_stream: - dataset: nats.log - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.nats.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - nats-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml index 4e5879469a4..930171a10de 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -1,4 +1,62 @@ inputs: + - name: filestream-nginx + id: filestream-nginx-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.access + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + ignore_older: 72h + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.error + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + ignore_older: 72h + multiline: + match: after + negate: true + pattern: '^\d{4}\/\d{2}\/\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - nginx-error + data_stream.namespace: default - name: httpjson-nginx id: httpjson-nginx-${kubernetes.hints.container_id} type: httpjson @@ -97,53 +155,3 @@ inputs: period: ${kubernetes.hints.nginx.stubstatus.period|kubernetes.hints.nginx.period|'10s'} server_status_path: /nginx_status data_stream.namespace: default - - name: filestream-nginx - id: filestream-nginx-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true - data_stream: - dataset: nginx.access - type: logs - exclude_files: - - .gz$ - ignore_older: 72h - parsers: - - container: - format: auto - stream: ${kubernetes.hints.nginx.access.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - nginx-access - - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true - data_stream: - dataset: nginx.error - type: logs - exclude_files: - - .gz$ - ignore_older: 72h - multiline: - match: after - negate: true - pattern: '^\d{4}\/\d{2}\/\d{2} ' - parsers: - - container: - format: auto - stream: ${kubernetes.hints.nginx.error.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - nginx-error - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml index 29e0c8f1699..e5dac21fdf8 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml @@ -1,4 +1,31 @@ inputs: + - name: filestream-oracle + id: filestream-oracle-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.database_audit + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^Audit file + parsers: + - multiline: + match: after + negate: true + pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} + timeout: 10 + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - oracle-database_audit + data_stream.namespace: default - name: sql/metrics-oracle id: sql/metrics-oracle-${kubernetes.hints.container_id} type: sql/metrics @@ -92,30 +119,3 @@ inputs: - query: WITH data_files AS (SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status FROM sys.dba_data_files UNION SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, status AS ONLINE_STATUS FROM sys.dba_temp_files), spaces AS (SELECT b.tablespace_name TB_NAME, tbs_size TB_SIZE_USED, a.free_space TB_SIZE_FREE FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+) = b.tablespace_name AND a.tablespace_name != 'TEMP'), temp_spaces AS (SELECT tablespace_name, tablespace_size, allocated_space, free_space FROM dba_temp_free_space WHERE tablespace_name = 'TEMP'), details AS (SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, sp.tb_size_used, sp.tb_size_free FROM data_files df, spaces sp WHERE df.tablespace_name = sp.tb_name UNION SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, tsp.tablespace_size - tsp.free_space AS TB_SIZE_USED, tsp.free_space AS TB_SIZE_FREE FROM data_files df, temp_spaces tsp WHERE df.tablespace_name = tsp.tablespace_name) SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status, tb_size_used, tb_size_free, SUM(bytes) over() AS TOTAL_BYTES FROM details response_format: table data_stream.namespace: default - - name: filestream-oracle - id: filestream-oracle-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true - data_stream: - dataset: oracle.database_audit - type: logs - exclude_files: - - .gz$ - exclude_lines: - - ^Audit file - parsers: - - multiline: - match: after - negate: true - pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} - timeout: 10 - type: pattern - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - tags: - - oracle-database_audit - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml index 7aeb20e1ccc..76bf6396568 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml @@ -13,6 +13,10 @@ inputs: max_message_size: 50KiB processors: - add_locale: null + - copy_fields: + fields: + - from: '@timestamp' + to: event.created - syslog: field: message format: auto @@ -21,6 +25,7 @@ inputs: fields: internal_zones: - trust + tz_offset: Local target: _conf - add_fields: fields: @@ -44,6 +49,10 @@ inputs: max_message_size: 50KiB processors: - add_locale: null + - copy_fields: + fields: + - from: '@timestamp' + to: event.created - syslog: field: message format: auto @@ -52,6 +61,7 @@ inputs: fields: internal_zones: - trust + tz_offset: Local target: _conf - add_fields: fields: @@ -72,7 +82,7 @@ inputs: dataset: panw.panos type: logs exclude_files: - - .gz$ + - \.gz$ fields: _conf: external_zones: @@ -81,6 +91,8 @@ inputs: - trust tz_offset: local fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -89,8 +101,14 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log processors: - add_locale: null + - copy_fields: + fields: + - from: '@timestamp' + to: event.created prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - panw-panos diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml index 18c9cf3ed4b..8099386204d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -22,6 +24,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - postgresql-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml index e5b613a4804..43c1ade916a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml @@ -59,9 +59,9 @@ inputs: - remote_write port: 9201 rate_counters: true - ssl.certificate: /etc/pki/server/cert.pem + ssl.certificate: null ssl.enabled: null - ssl.key: null + ssl.key: /etc/pki/server/cert.key types_patterns.exclude: null types_patterns.include: null use_types: true diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml index 698bb87070c..075f51fdb1f 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -24,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml index eefb7e7a008..a1321a6880c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml @@ -1,29 +1,4 @@ inputs: - - name: filestream-redis - id: filestream-redis-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true - data_stream: - dataset: redis.log - type: logs - exclude_files: - - .gz$ - exclude_lines: - - ^\s+[\-`('.|_] - parsers: - - container: - format: auto - stream: ${kubernetes.hints.redis.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - redis-log - data_stream.namespace: default - name: redis-redis id: redis-redis-${kubernetes.hints.container_id} type: redis @@ -85,3 +60,32 @@ inputs: password: ${kubernetes.hints.redis.keyspace.password|kubernetes.hints.redis.password|''} period: ${kubernetes.hints.redis.keyspace.period|kubernetes.hints.redis.period|'10s'} data_stream.namespace: default + - name: filestream-redis + id: filestream-redis-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.log + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^\s+[\-`('.|_] + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.redis.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - redis-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml index 3797fadc554..ca4282d7036 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - santa-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml index 8557717a5db..7e7c6e3de88 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -1,4 +1,26 @@ inputs: + - name: filestream-sentinel_one + id: filestream-sentinel_one-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true + data_stream: + dataset: sentinel_one.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default - name: httpjson-sentinel_one id: httpjson-sentinel_one-${kubernetes.hints.container_id} type: httpjson @@ -195,25 +217,3 @@ inputs: - forwarded - sentinel_one-threat data_stream.namespace: default - - name: filestream-sentinel_one - id: filestream-sentinel_one-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true - data_stream: - dataset: sentinel_one.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml index 36254df2c53..f08e0fb0c69 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -26,6 +28,8 @@ inputs: target: _tmp prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml index 9d94ff431d4..39de591c936 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml index cbb037a11d1..a25c2eb659c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml index 17f23d019e6..83e8671e3b3 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -15,6 +15,8 @@ inputs: remove_mapped_fields: false tz_offset: UTC fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -23,6 +25,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - symantec-endpoint-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml index 96a643f41ea..5127a4ba11d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml @@ -1,4 +1,32 @@ inputs: + - name: synthetics/http-synthetics + id: synthetics/http-synthetics-${kubernetes.hints.container_id} + type: synthetics/http + use_output: default + streams: + - __ui: null + check.request.method: null + condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: http + type: synthetics + enabled: true + ipv4: true + ipv6: true + max_attempts: 2 + max_redirects: null + name: null + password: ${kubernetes.hints.synthetics.http.password|kubernetes.hints.synthetics.password|''} + response.include_body: null + response.include_headers: null + run_from.geo.name: Fleet managed + run_from.id: fleet_managed + schedule: '@every 3m' + timeout: ${kubernetes.hints.synthetics.http.timeout|kubernetes.hints.synthetics.timeout|''} + type: http + urls: null + username: ${kubernetes.hints.synthetics.http.username|kubernetes.hints.synthetics.username|''} + data_stream.namespace: default - name: synthetics/tcp-synthetics id: synthetics/tcp-synthetics-${kubernetes.hints.container_id} type: synthetics/tcp @@ -13,12 +41,8 @@ inputs: hosts: ${kubernetes.hints.synthetics.tcp.host|kubernetes.hints.synthetics.host|''} ipv4: true ipv6: true + max_attempts: 2 name: null - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" proxy_use_local_resolver: false run_from.geo.name: Fleet managed run_from.id: fleet_managed @@ -40,12 +64,8 @@ inputs: hosts: ${kubernetes.hints.synthetics.icmp.host|kubernetes.hints.synthetics.host|''} ipv4: true ipv6: true + max_attempts: 2 name: null - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" run_from.geo.name: Fleet managed run_from.id: fleet_managed schedule: '@every 3m' @@ -64,12 +84,8 @@ inputs: dataset: browser type: synthetics enabled: true + max_attempts: 2 name: null - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" run_from.geo.name: Fleet managed run_from.id: fleet_managed schedule: '@every 3m' @@ -117,35 +133,3 @@ inputs: symlinks: true tags: [] data_stream.namespace: default - - name: synthetics/http-synthetics - id: synthetics/http-synthetics-${kubernetes.hints.container_id} - type: synthetics/http - use_output: default - streams: - - __ui: null - check.request.method: null - condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true - data_stream: - dataset: http - type: synthetics - enabled: true - ipv4: true - ipv6: true - max_redirects: null - name: null - password: ${kubernetes.hints.synthetics.http.password|kubernetes.hints.synthetics.password|''} - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" - response.include_body: null - response.include_headers: null - run_from.geo.name: Fleet managed - run_from.id: fleet_managed - schedule: '@every 3m' - timeout: ${kubernetes.hints.synthetics.http.timeout|kubernetes.hints.synthetics.timeout|''} - type: http - urls: null - username: ${kubernetes.hints.synthetics.http.username|kubernetes.hints.synthetics.username|''} - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml index e88d1490bc4..4b9422213e0 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml @@ -1,27 +1,20 @@ inputs: - - name: filestream-tomcat - id: filestream-tomcat-${kubernetes.hints.container_id} - type: filestream + - name: udp-tomcat + id: udp-tomcat-${kubernetes.hints.container_id} + type: udp use_output: default streams: - - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true data_stream: dataset: tomcat.log type: logs - exclude_files: - - .gz$ fields: observer: product: TomCat type: Web vendor: Apache fields_under_root: true - parsers: - - container: - format: auto - stream: ${kubernetes.hints.tomcat.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log + host: localhost:9523 processors: - script: lang: javascript @@ -2764,16 +2757,14 @@ inputs: target_field: url.registered_domain target_subdomain_field: url.subdomain - add_locale: null - prospector: - scanner: - symlinks: true tags: - tomcat-log - forwarded + udp: null data_stream.namespace: default - - name: udp-tomcat - id: udp-tomcat-${kubernetes.hints.container_id} - type: udp + - name: tcp-tomcat + id: tcp-tomcat-${kubernetes.hints.container_id} + type: tcp use_output: default streams: - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true @@ -5532,24 +5523,33 @@ inputs: tags: - tomcat-log - forwarded - udp: null + tcp: null data_stream.namespace: default - - name: tcp-tomcat - id: tcp-tomcat-${kubernetes.hints.container_id} - type: tcp + - name: filestream-tomcat + id: filestream-tomcat-${kubernetes.hints.container_id} + type: filestream use_output: default streams: - - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true data_stream: dataset: tomcat.log type: logs + exclude_files: + - .gz$ fields: observer: product: TomCat type: Web vendor: Apache fields_under_root: true - host: localhost:9523 + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.tomcat.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log processors: - script: lang: javascript @@ -8292,8 +8292,12 @@ inputs: target_field: url.registered_domain target_subdomain_field: url.subdomain - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true tags: - tomcat-log - forwarded - tcp: null data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml index dbd3b642d42..b485c4cbed5 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -31,8 +35,7 @@ inputs: data_stream: dataset: traefik.health type: metrics - hosts: - - ${kubernetes.hints.traefik.health.host|kubernetes.hints.traefik.host|'localhost:8080'} + hosts: null metricsets: - health period: ${kubernetes.hints.traefik.health.period|kubernetes.hints.traefik.period|'10s'} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml index bc21b484f27..1204c4e7e9c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml @@ -1,17 +1,4 @@ inputs: - - name: udp-udp - id: udp-udp-${kubernetes.hints.container_id} - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true - data_stream: - dataset: udp.generic - type: logs - host: localhost:8080 - max_message_size: 10KiB - timeout: ${kubernetes.hints.udp.generic.timeout|kubernetes.hints.udp.timeout|''} - data_stream.namespace: default - name: filestream-udp id: filestream-udp-${kubernetes.hints.container_id} type: filestream @@ -34,3 +21,16 @@ inputs: symlinks: true tags: [] data_stream.namespace: default + - name: udp-udp + id: udp-udp-${kubernetes.hints.container_id} + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true + data_stream: + dataset: udp.generic + type: logs + host: localhost:8080 + max_message_size: 10KiB + timeout: ${kubernetes.hints.udp.generic.timeout|kubernetes.hints.udp.timeout|''} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml index 871e2ade541..eac5df1c517 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +22,8 @@ inputs: - /usr/local/var/spool/zeek/capture_loss.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -30,6 +34,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -40,6 +46,8 @@ inputs: - /usr/local/var/spool/zeek/conn.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -50,6 +58,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -60,6 +70,8 @@ inputs: - /usr/local/var/spool/zeek/dce_rpc.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -70,6 +82,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -80,6 +94,8 @@ inputs: - /usr/local/var/spool/zeek/dhcp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -90,6 +106,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -100,6 +118,8 @@ inputs: - /usr/local/var/spool/zeek/dnp3.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - zeek-dnp3 @@ -109,6 +129,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -119,6 +141,8 @@ inputs: - /usr/local/var/spool/zeek/dns.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -129,6 +153,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -139,6 +165,8 @@ inputs: - /usr/local/var/spool/zeek/dpd.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - zeek-dpd @@ -148,6 +176,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -158,6 +188,8 @@ inputs: - /usr/local/var/spool/zeek/files.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - zeek-files @@ -167,6 +199,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -177,6 +211,8 @@ inputs: - /usr/local/var/spool/zeek/ftp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -187,6 +223,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -197,6 +235,8 @@ inputs: - /usr/local/var/spool/zeek/http.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -207,6 +247,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -217,6 +259,8 @@ inputs: - /usr/local/var/spool/zeek/intel.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -227,6 +271,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -237,6 +283,8 @@ inputs: - /usr/local/var/spool/zeek/irc.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -247,6 +295,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -257,6 +307,8 @@ inputs: - /usr/local/var/spool/zeek/kerberos.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -267,6 +319,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -277,6 +331,8 @@ inputs: - /usr/local/var/spool/zeek/known_certs.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -287,6 +343,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -297,6 +355,8 @@ inputs: - /usr/local/var/spool/zeek/known_hosts.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -307,6 +367,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -317,6 +379,8 @@ inputs: - /usr/local/var/spool/zeek/known_services.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -327,6 +391,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -337,6 +403,8 @@ inputs: - /usr/local/var/spool/zeek/modbus.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -347,6 +415,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -357,6 +427,8 @@ inputs: - /usr/local/var/spool/zeek/mysql.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -367,6 +439,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -377,6 +451,8 @@ inputs: - /usr/local/var/spool/zeek/notice.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -387,6 +463,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -397,6 +475,8 @@ inputs: - /usr/local/var/spool/zeek/ntlm.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -407,6 +487,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -417,6 +499,8 @@ inputs: - /usr/local/var/spool/zeek/ntp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -427,6 +511,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -437,6 +523,8 @@ inputs: - /usr/local/var/spool/zeek/ocsp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -447,6 +535,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -457,6 +547,8 @@ inputs: - /usr/local/var/spool/zeek/pe.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -467,6 +559,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -477,6 +571,8 @@ inputs: - /usr/local/var/spool/zeek/radius.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -487,6 +583,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -497,6 +595,8 @@ inputs: - /usr/local/var/spool/zeek/rdp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -507,6 +607,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -517,6 +619,8 @@ inputs: - /usr/local/var/spool/zeek/rfb.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -527,6 +631,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -537,6 +643,8 @@ inputs: - /usr/local/var/spool/zeek/signature.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -547,6 +655,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -557,6 +667,8 @@ inputs: - /usr/local/var/spool/zeek/sip.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -567,6 +679,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -577,6 +691,8 @@ inputs: - /usr/local/var/spool/zeek/smb_cmd.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -587,6 +703,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -597,6 +715,8 @@ inputs: - /usr/local/var/spool/zeek/smb_files.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -607,6 +727,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -617,6 +739,8 @@ inputs: - /usr/local/var/spool/zeek/smb_mapping.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -627,6 +751,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -637,6 +763,8 @@ inputs: - /usr/local/var/spool/zeek/smtp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -647,6 +775,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -657,6 +787,8 @@ inputs: - /usr/local/var/spool/zeek/snmp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -667,6 +799,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -677,6 +811,8 @@ inputs: - /usr/local/var/spool/zeek/socks.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -687,6 +823,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -697,6 +835,8 @@ inputs: - /usr/local/var/spool/zeek/software.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -707,6 +847,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -717,6 +859,8 @@ inputs: - /usr/local/var/spool/zeek/ssh.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -727,6 +871,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -737,6 +883,8 @@ inputs: - /usr/local/var/spool/zeek/ssl.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -747,6 +895,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -757,6 +907,8 @@ inputs: - /usr/local/var/spool/zeek/stats.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -767,6 +919,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -777,6 +931,8 @@ inputs: - /usr/local/var/spool/zeek/syslog.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -787,6 +943,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -797,6 +955,8 @@ inputs: - /usr/local/var/spool/zeek/traceroute.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -807,6 +967,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -817,6 +979,8 @@ inputs: - /usr/local/var/spool/zeek/tunnel.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -827,6 +991,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -837,6 +1003,8 @@ inputs: - /usr/local/var/spool/zeek/weird.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -847,6 +1015,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -857,6 +1027,8 @@ inputs: - /usr/local/var/spool/zeek/x509.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index ebf4aac261b..41104997df1 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -49,14 +49,15 @@ const ( ) var ( - excludedPathsPattern = regexp.MustCompile(`node_modules`) - configFilePattern = regexp.MustCompile(`.*beat\.spec.yml$|.*beat\.yml$|apm-server\.yml|elastic-agent\.yml$$`) - manifestFilePattern = regexp.MustCompile(`manifest.yml`) - modulesDirPattern = regexp.MustCompile(`module/.+`) - modulesDDirPattern = regexp.MustCompile(`modules.d/$`) - modulesDFilePattern = regexp.MustCompile(`modules.d/.+`) - monitorsDFilePattern = regexp.MustCompile(`monitors.d/.+`) - systemdUnitFilePattern = regexp.MustCompile(`/lib/systemd/system/.*\.service`) + excludedPathsPattern = regexp.MustCompile(`node_modules`) + configFilePattern = regexp.MustCompile(`.*beat\.spec.yml$|.*beat\.yml$|apm-server\.yml|elastic-agent\.yml$$`) + manifestFilePattern = regexp.MustCompile(`manifest.yml`) + modulesDirPattern = regexp.MustCompile(`module/.+`) + modulesDDirPattern = regexp.MustCompile(`modules.d/$`) + modulesDFilePattern = regexp.MustCompile(`modules.d/.+`) + monitorsDFilePattern = regexp.MustCompile(`monitors.d/.+`) + systemdUnitFilePattern = regexp.MustCompile(`/lib/systemd/system/.*\.service`) + hintsInputsDFilePattern = regexp.MustCompile(`usr/share/elastic-agent/hints.inputs.d/.*\.yml`) licenseFiles = []string{"LICENSE.txt", "NOTICE.txt"} ) @@ -297,6 +298,7 @@ func checkDocker(t *testing.T, file string) { checkManifestPermissionsWithMode(t, p, os.FileMode(0644)) checkModulesPresent(t, "", p) checkModulesDPresent(t, "", p) + checkHintsInputsD(t, "hints.inputs.d", hintsInputsDFilePattern, p) checkLicensesPresent(t, "licenses/", p) } @@ -447,6 +449,21 @@ func checkMonitorsDPresent(t *testing.T, prefix string, p *packageFile) { } } +func checkHintsInputsD(t *testing.T, name string, r *regexp.Regexp, p *packageFile) { + t.Run(fmt.Sprintf("%s %s contents", p.Name, name), func(t *testing.T) { + total := 0 + for _, entry := range p.Contents { + if r.MatchString(entry.File) { + total++ + } + } + + if total == 0 { + t.Errorf("no hints inputs found under %s", name) + } + }) +} + func checkModules(t *testing.T, name, prefix string, r *regexp.Regexp, p *packageFile) { t.Run(fmt.Sprintf("%s %s contents", p.Name, name), func(t *testing.T) { minExpectedModules := *minModules @@ -711,7 +728,7 @@ func readTarContents(tarName string, data io.Reader) (*packageFile, error) { File: header.Name, UID: header.Uid, GID: header.Gid, - Mode: os.FileMode(header.Mode), + Mode: os.FileMode(header.Mode), //nolint:gosec // Reason: header.Mode should never overflow from int64 -> uint32 } } diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index ec7008748a9..f2bdddb36be 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -274,6 +274,9 @@ shared: content: > {{ commit }} mode: 0644 + 'hints.inputs.d': + source: '{{ repo.RootDir }}/deploy/kubernetes/elastic-agent-standalone/templates.d' + mode: 0755 # cloud build to beats-ci repository - &agent_docker_cloud_spec diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 4e42ccca917..397cf6a2eeb 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" + "github.com/elastic/elastic-agent/internal/pkg/composable/providers/kubernetes" "github.com/elastic/elastic-agent/internal/pkg/config" otelmanager "github.com/elastic/elastic-agent/internal/pkg/otel/manager" "github.com/elastic/elastic-agent/internal/pkg/release" @@ -135,7 +136,12 @@ func New( log.Info("Parsed configuration and determined agent is managed locally") loader := config.NewLoader(log, paths.ExternalInputs()) - discover := config.Discoverer(pathConfigFile, cfg.Settings.Path, paths.ExternalInputs()) + rawCfgMap, err := rawConfig.ToMapStr() + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to transform agent configuration into a map: %w", err) + } + discover := config.Discoverer(pathConfigFile, cfg.Settings.Path, paths.ExternalInputs(), + kubernetes.GetHintsInputConfigPath(log, rawCfgMap)) if !cfg.Settings.Reload.Enabled { log.Debug("Reloading of configuration is off") configMgr = newOnce(log, discover, loader) diff --git a/internal/pkg/agent/cmd/enroll.go b/internal/pkg/agent/cmd/enroll.go index 924ab373f8f..b1f82e6989e 100644 --- a/internal/pkg/agent/cmd/enroll.go +++ b/internal/pkg/agent/cmd/enroll.go @@ -351,6 +351,24 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command) error { fromInstall, _ := cmd.Flags().GetBool(fromInstallArg) + hasRoot, err := utils.HasRoot() + if err != nil { + return fmt.Errorf("checking if running with root/Administrator privileges: %w", err) + } + if hasRoot && !fromInstall { + binPath, err := os.Executable() + if err != nil { + return fmt.Errorf("error while getting executable path: %w", err) + } + isOwner, err := isOwnerExec(binPath) + if err != nil { + return fmt.Errorf("ran into an error while figuring out if user is allowed to execute the enroll command: %w", err) + } + if !isOwner { + return UserOwnerMismatchError + } + } + pathConfigFile := paths.ConfigFile() rawConfig, err := config.LoadFile(pathConfigFile) if err != nil { @@ -525,7 +543,6 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command) error { pathConfigFile, store, ) - if err != nil { return err } diff --git a/internal/pkg/agent/cmd/enroll_match_fileowner_unix.go b/internal/pkg/agent/cmd/enroll_match_fileowner_unix.go new file mode 100644 index 00000000000..6e0df49a060 --- /dev/null +++ b/internal/pkg/agent/cmd/enroll_match_fileowner_unix.go @@ -0,0 +1,59 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build !windows + +package cmd + +import ( + "fmt" + "os" + "strconv" + "syscall" + + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" +) + +var UserOwnerMismatchError = errors.New("the command is executed as root but the program files are not owned by the root user. execute the command as the user that owns the program files") + +func getFileOwner(filePath string) (string, error) { + fileInfo, err := os.Stat(filePath) + if err != nil { + return "", fmt.Errorf("failed to get file info: %w", err) + } + + stat, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return "", fmt.Errorf("failed to get system specific file info: %w", err) + } + return strconv.Itoa(int(stat.Uid)), nil +} + +func getCurrentUser() (string, error) { + return strconv.Itoa(os.Geteuid()), nil +} + +func isFileOwner(curUser string, fileOwner string) (bool, error) { + return curUser == fileOwner, nil +} + +// Checks if the provided file is owned by the user that initiated the process +func isOwnerExec(filePath string) (bool, error) { + owner, err := getFileOwner(filePath) + if err != nil { + return false, fmt.Errorf("failed to get file owner: %w", err) + } + + curUser, err := getCurrentUser() + if err != nil { + return false, fmt.Errorf("failed to get current user: %w", err) + } + + isOwner, err := isFileOwner(curUser, owner) + if err != nil { + return false, fmt.Errorf("error while checking if current user is the file owner: %w", err) + } + + return isOwner, nil +} diff --git a/internal/pkg/agent/cmd/enroll_match_fileowner_unix_test.go b/internal/pkg/agent/cmd/enroll_match_fileowner_unix_test.go new file mode 100644 index 00000000000..9ad3b22db8c --- /dev/null +++ b/internal/pkg/agent/cmd/enroll_match_fileowner_unix_test.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build !windows + +package cmd + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsOwnerExecUnix(t *testing.T) { + path := t.TempDir() + fp := filepath.Join(path, "testfile") + fi, err := os.Create(fp) + require.NoError(t, err) + defer fi.Close() + + isOwner, err := isOwnerExec(fp) + require.NoError(t, err) + + require.True(t, isOwner) +} diff --git a/internal/pkg/agent/cmd/enroll_match_fileowner_windows.go b/internal/pkg/agent/cmd/enroll_match_fileowner_windows.go new file mode 100644 index 00000000000..75ccacd0997 --- /dev/null +++ b/internal/pkg/agent/cmd/enroll_match_fileowner_windows.go @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build windows + +package cmd + +import ( + "fmt" + + "golang.org/x/sys/windows" + + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" +) + +var UserOwnerMismatchError = errors.New("the command is executed as root but the program files are not owned by the root user.") + +func getFileOwner(filePath string) (string, error) { + // Get security information of the file + sd, err := windows.GetNamedSecurityInfo( + filePath, + windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION, + ) + if err != nil { + return "", fmt.Errorf("failed to get security info: %w", err) + } + owner, _, err := sd.Owner() + if err != nil { + return "", fmt.Errorf("failed to get security descriptor owner: %w", err) + } + + return owner.String(), nil +} + +// Helper to get the current user's SID +func getCurrentUser() (string, error) { + // Get the token for the current process + var token windows.Token + err := windows.OpenProcessToken(windows.CurrentProcess(), windows.TOKEN_QUERY, &token) + if err != nil { + return "", fmt.Errorf("failed to open process token: %w", err) + } + defer token.Close() + + // Get the token use + tokenUser, err := token.GetTokenUser() + if err != nil { + return "", fmt.Errorf("failed to get token user: %w", err) + } + + return tokenUser.User.Sid.String(), nil +} + +func isFileOwner(curUser string, fileOwner string) (bool, error) { + var cSid *windows.SID + err := windows.ConvertStringSidToSid(windows.StringToUTF16Ptr(curUser), &cSid) + if err != nil { + return false, fmt.Errorf("failed to convert user SID string to SID: %w", err) + } + + var fSid *windows.SID + err = windows.ConvertStringSidToSid(windows.StringToUTF16Ptr(fileOwner), &fSid) + if err != nil { + return false, fmt.Errorf("failed to convert file SID string to SID: %w", err) + } + + isEqual := fSid.Equals(cSid) + + return isEqual, nil +} + +// Checks if the provided file is owned by the user that initiated the process +func isOwnerExec(filePath string) (bool, error) { + fileOwner, err := getFileOwner(filePath) + if err != nil { + return false, fmt.Errorf("getting file owner: %w", err) + } + + user, err := getCurrentUser() + if err != nil { + return false, fmt.Errorf("ran into an error while retrieving current user: %w", err) + } + + isOwner, err := isFileOwner(user, fileOwner) + if err != nil { + return false, fmt.Errorf("error while checking if current user is the file owner: %w", err) + } + + return isOwner, nil +} diff --git a/internal/pkg/agent/cmd/enroll_match_fileowner_windows_test.go b/internal/pkg/agent/cmd/enroll_match_fileowner_windows_test.go new file mode 100644 index 00000000000..6af8b984014 --- /dev/null +++ b/internal/pkg/agent/cmd/enroll_match_fileowner_windows_test.go @@ -0,0 +1,52 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build windows + +package cmd + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows" +) + +func TestIsOwnerExecWindows(t *testing.T) { + path := t.TempDir() + fp := filepath.Join(path, "testfile") + fi, err := os.Create(fp) + require.NoError(t, err) + defer fi.Close() + + var token windows.Token + err = windows.OpenProcessToken(windows.CurrentProcess(), windows.TOKEN_QUERY, &token) + require.NoError(t, err) + defer token.Close() + + tokenUser, err := token.GetTokenUser() + require.NoError(t, err) + + err = windows.SetNamedSecurityInfo( + fp, + windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION, + tokenUser.User.Sid, + nil, + nil, + nil, + ) + require.NoError(t, err) + + require.NoError(t, err) + defer fi.Close() + + isOwner, err := isOwnerExec(fp) + require.NoError(t, err) + + require.True(t, isOwner, fmt.Sprintf("expected isOwnerExec to return \"true\", received \"%v\"", isOwner)) +} diff --git a/internal/pkg/composable/providers/kubernetes/config.go b/internal/pkg/composable/providers/kubernetes/config.go index c3fbc957c79..0e287fd2c3f 100644 --- a/internal/pkg/composable/providers/kubernetes/config.go +++ b/internal/pkg/composable/providers/kubernetes/config.go @@ -5,13 +5,18 @@ package kubernetes import ( + "errors" "time" "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/utils" ) +const hintsInputsPathPattern = "/usr/share/elastic-agent/hints.inputs.d/*.yml" + // Config for kubernetes provider type Config struct { Scope string `config:"scope"` @@ -57,6 +62,22 @@ type Enabled struct { Enabled bool `config:"enabled"` } +func GetHintsInputConfigPath(log *logger.Logger, agentCfg map[string]interface{}) string { + hintsVal, err := utils.GetNestedMap(agentCfg, "providers", "kubernetes", "hints", "enabled") + if err != nil { + if errors.Is(err, utils.ErrKeyNotFound) { + return "" + } + log.Errorf("error at reading providers.kubernetes.hints.enabled from config: %v", err) + return "" + } + hintsEnabled, ok := hintsVal.(bool) + if !ok || !hintsEnabled { + return "" + } + return hintsInputsPathPattern +} + // InitDefaults initializes the default values for the config. func (c *Config) InitDefaults() { c.CleanupTimeout = 60 * time.Second diff --git a/internal/pkg/composable/providers/kubernetes/config_test.go b/internal/pkg/composable/providers/kubernetes/config_test.go new file mode 100644 index 00000000000..e840b9319cc --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/config_test.go @@ -0,0 +1,80 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func TestGetHintsInputConfigPath(t *testing.T) { + + log, err := logger.New("loader_test", true) + require.NoError(t, err, "failed to create logger ", err) + + for _, tc := range []struct { + name string + cfg map[string]any + expectedPath string + }{ + { + name: "fully composite yaml key", + cfg: map[string]any{ + "providers.kubernetes.hints.enabled": true, + }, + expectedPath: hintsInputsPathPattern, + }, + { + name: "partially composite yaml key", + cfg: map[string]any{ + "providers.kubernetes": map[string]any{ + "hints.enabled": false, + }, + }, + expectedPath: "", + }, + { + name: "normal yaml key", + cfg: map[string]any{ + "providers": map[string]any{ + "kubernetes": map[string]any{ + "hints": map[string]any{ + "enabled": true, + }, + }, + }, + }, + expectedPath: hintsInputsPathPattern, + }, + { + name: "hints enabled no bool", + cfg: map[string]any{ + "providers": map[string]any{ + "kubernetes": map[string]any{ + "hints": map[string]any{ + "enabled": "true", + }, + }, + }, + }, + expectedPath: "", + }, + } { + t.Run(tc.name, func(t *testing.T) { + cfg, err := config.NewConfigFrom(tc.cfg) + require.NoError(t, err) + + mapCfg, err := cfg.ToMapStr() + require.NoError(t, err) + + require.Equal(t, tc.expectedPath, GetHintsInputConfigPath(log, mapCfg)) + }) + } + +} diff --git a/internal/pkg/config/loader.go b/internal/pkg/config/loader.go index 00b6552b1e2..8dd8e0b043a 100644 --- a/internal/pkg/config/loader.go +++ b/internal/pkg/config/loader.go @@ -7,6 +7,7 @@ package config import ( "fmt" "path/filepath" + "strings" "go.opentelemetry.io/collector/confmap" @@ -102,9 +103,11 @@ func getInput(c *Config) ([]*ucfg.Config, error) { return tmpConfig.Inputs, nil } +// isFileUnderInputsFolder checks if the given f path matches the Loader inputsFolder or +// if the parent directory of it has the suffix inputs.d func (l *Loader) isFileUnderInputsFolder(f string) bool { if matches, err := filepath.Match(l.inputsFolder, f); !matches || err != nil { - return false + return strings.HasSuffix(filepath.Dir(f), "inputs.d") } return true } diff --git a/internal/pkg/crypto/io_test.go b/internal/pkg/crypto/io_test.go index 76fd455f184..f8188c860da 100644 --- a/internal/pkg/crypto/io_test.go +++ b/internal/pkg/crypto/io_test.go @@ -15,7 +15,7 @@ import ( func TestIO(t *testing.T) { t.Run("encode and decode with the right password", func(t *testing.T) { - passwd := []byte("hello") + passwd := bytes.Repeat([]byte("hello"), 10) msg := []byte("bonjour la famille") dest := new(bytes.Buffer) @@ -40,7 +40,7 @@ func TestIO(t *testing.T) { }) t.Run("Large single write", func(t *testing.T) { - passwd := []byte("hello") + passwd := bytes.Repeat([]byte("hello"), 10) msg, err := randomBytes(1327) require.NoError(t, err) @@ -67,7 +67,7 @@ func TestIO(t *testing.T) { }) t.Run("try to decode with the wrong password", func(t *testing.T) { - passwd := []byte("hello") + passwd := bytes.Repeat([]byte("hello"), 10) msg := []byte("bonjour la famille") dest := new(bytes.Buffer) @@ -90,7 +90,7 @@ func TestIO(t *testing.T) { }) t.Run("Make sure that buffered IO works with the encoder", func(t *testing.T) { - passwd := []byte("hello") + passwd := bytes.Repeat([]byte("hello"), 10) msg, err := randomBytes(2048) require.NoError(t, err) dest := new(bytes.Buffer) @@ -121,7 +121,7 @@ func TestIO(t *testing.T) { }) t.Run("Make sure that buffered IO works with the decoder", func(t *testing.T) { - passwd := []byte("hello") + passwd := bytes.Repeat([]byte("hello"), 10) msg, err := randomBytes(2048) require.NoError(t, err) dest := new(bytes.Buffer) @@ -163,7 +163,7 @@ func TestIO(t *testing.T) { }) t.Run("works with multiple writes", func(t *testing.T) { - passwd := []byte("hello") + passwd := bytes.Repeat([]byte("hello"), 10) expected := []byte("hello world bonjour la famille") diff --git a/pkg/testing/tools/tools.go b/pkg/testing/tools/tools.go index 5bc9346842e..b36e9a4fc81 100644 --- a/pkg/testing/tools/tools.go +++ b/pkg/testing/tools/tools.go @@ -42,7 +42,8 @@ func InstallAgentWithPolicy(ctx context.Context, t *testing.T, installOpts atesting.InstallOpts, agentFixture *atesting.Fixture, kibClient *kibana.Client, - createPolicyReq kibana.AgentPolicy) (kibana.PolicyResponse, error) { + createPolicyReq kibana.AgentPolicy, +) (kibana.PolicyResponse, error) { t.Helper() // Create policy @@ -85,21 +86,41 @@ func InstallAgentForPolicy(ctx context.Context, t *testing.T, installOpts atesting.InstallOpts, agentFixture *atesting.Fixture, kibClient *kibana.Client, - policyID string) error { - t.Helper() + policyID string, +) error { + enrollmentToken, err := CreateEnrollmentToken(t, ctx, kibClient, policyID) + if err != nil { + return fmt.Errorf("failed to create enrollment token while preparing to install agent for policy: %w", err) + } + return InstallAgentForPolicyWithToken(ctx, t, installOpts, agentFixture, kibClient, policyID, enrollmentToken) +} +func CreateEnrollmentToken(t *testing.T, ctx context.Context, kibClient *kibana.Client, policyID string) (kibana.CreateEnrollmentAPIKeyResponse, error) { // Create enrollment API key createEnrollmentAPIKeyReq := kibana.CreateEnrollmentAPIKeyRequest{ PolicyID: policyID, } + t.Logf("Creating enrollment API key...") + enrollmentToken, err := kibClient.CreateEnrollmentAPIKey(ctx, createEnrollmentAPIKeyReq) + if err != nil { + return kibana.CreateEnrollmentAPIKeyResponse{}, fmt.Errorf("failed creating enrollment API key: %w", err) + } + + return enrollmentToken, nil +} + +func InstallAgentForPolicyWithToken(ctx context.Context, t *testing.T, + installOpts atesting.InstallOpts, + agentFixture *atesting.Fixture, + kibClient *kibana.Client, + policyID string, + enrollmentToken kibana.CreateEnrollmentAPIKeyResponse, +) error { + t.Helper() + if installOpts.EnrollmentToken == "" { t.Logf("Creating enrollment API key...") - enrollmentToken, err := kibClient.CreateEnrollmentAPIKey(ctx, createEnrollmentAPIKeyReq) - if err != nil { - return fmt.Errorf("failed creating enrollment API key: %w", err) - } - installOpts.EnrollmentToken = enrollmentToken.APIKey } @@ -138,5 +159,6 @@ func InstallAgentForPolicy(ctx context.Context, t *testing.T, 10*time.Second, "Elastic Agent status is not online", ) + return nil } diff --git a/testing/integration/enroll_unprivileged_test.go b/testing/integration/enroll_unprivileged_test.go new file mode 100644 index 00000000000..48bf6ab4f30 --- /dev/null +++ b/testing/integration/enroll_unprivileged_test.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build integration + +package integration + +import ( + "context" + "os" + "testing" + + "github.com/gofrs/uuid/v5" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/kibana" + "github.com/elastic/elastic-agent/internal/pkg/agent/cmd" + atesting "github.com/elastic/elastic-agent/pkg/testing" + "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools" + "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" +) + +func TestEnrollUnprivileged(t *testing.T) { + info := define.Require(t, define.Requirements{ + Group: Default, + Stack: &define.Stack{}, + Sudo: true, + }) + t.Run("unenrolled unprivileged agent re-enrolls successfully using root user", func(t *testing.T) { + ctx := context.Background() + fixture, err := define.NewFixtureFromLocalBuild(t, define.Version()) + require.NoError(t, err) + installOpts := atesting.InstallOpts{ + NonInteractive: true, + Force: true, + Privileged: false, + } + + randId := uuid.Must(uuid.NewV4()).String() + policyReq := kibana.AgentPolicy{ + Name: "test-policy-" + randId, + Namespace: "default", + Description: "Test policy " + randId, + MonitoringEnabled: []kibana.MonitoringEnabledOption{ + kibana.MonitoringEnabledLogs, + kibana.MonitoringEnabledMetrics, + }, + } + policy, err := info.KibanaClient.CreatePolicy(ctx, policyReq) + require.NoError(t, err) + + enrollmentApiKey, err := tools.CreateEnrollmentToken(t, ctx, info.KibanaClient, policy.ID) + require.NoError(t, err) + + err = tools.InstallAgentForPolicyWithToken(ctx, t, installOpts, fixture, info.KibanaClient, policy.ID, enrollmentApiKey) + require.NoError(t, err) + + hostname, err := os.Hostname() + require.NoError(t, err) + + agent, err := fleettools.GetAgentByPolicyIDAndHostnameFromList(ctx, info.KibanaClient, policy.ID, hostname) + require.NoError(t, err) + + _, err = info.KibanaClient.UnEnrollAgent(ctx, kibana.UnEnrollAgentRequest{ID: agent.ID}) + require.NoError(t, err) + + enrollUrl, err := fleettools.DefaultURL(ctx, info.KibanaClient) + require.NoError(t, err) + + enrollArgs := []string{"enroll", "--url", enrollUrl, "--enrollment-token", enrollmentApiKey.APIKey, "--force"} + + out, err := fixture.Exec(ctx, enrollArgs) + require.Error(t, err) + require.Contains(t, string(out), cmd.UserOwnerMismatchError.Error()) + }) +} diff --git a/testing/integration/testdata/.upgrade-test-agent-versions.yml b/testing/integration/testdata/.upgrade-test-agent-versions.yml index 8d77c207fd8..aedff1f9682 100644 --- a/testing/integration/testdata/.upgrade-test-agent-versions.yml +++ b/testing/integration/testdata/.upgrade-test-agent-versions.yml @@ -8,5 +8,5 @@ testVersions: - 8.18.0-SNAPSHOT - 8.17.1-SNAPSHOT - 8.17.0 - - 8.16.1 + - 8.16.2 - 7.17.27-SNAPSHOT