From 577ab59b21cda42220a9280be826dc430d9020a2 Mon Sep 17 00:00:00 2001 From: "elastic-vault-github-plugin-prod[bot]" <150874479+elastic-vault-github-plugin-prod[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 14:41:10 +0100 Subject: [PATCH] [automation] Publish kubernetes templates for elastic-agent (#6402) Co-authored-by: elasticmachine (cherry picked from commit a6e2cb113a62d792388900f11d9c1e612080f560) --- .../templates.d/activemq.yml | 11 +- .../templates.d/apache.yml | 92 +- .../templates.d/cassandra.yml | 4 + .../templates.d/cef.yml | 46 +- .../templates.d/checkpoint.yml | 4 + .../templates.d/crowdstrike.yml | 8 + .../templates.d/cyberarkpas.yml | 4 + .../templates.d/elasticsearch.yml | 20 + .../templates.d/fireeye.yml | 4 + .../templates.d/haproxy.yml | 4 + .../templates.d/hashicorp_vault.yml | 8 + .../templates.d/iis.yml | 62 +- .../templates.d/infoblox_nios.yml | 4 + .../templates.d/iptables.yml | 36 +- .../templates.d/kafka.yml | 62 +- .../templates.d/kibana.yml | 29 + .../templates.d/logstash.yml | 358 ++++- .../templates.d/mattermost.yml | 6 +- .../templates.d/microsoft_sqlserver.yml | 13 +- .../templates.d/mimecast.yml | 1162 ++++++++++++++++- .../templates.d/modsecurity.yml | 4 + .../templates.d/mongodb.yml | 8 +- .../templates.d/mysql.yml | 86 +- .../templates.d/nats.yml | 50 +- .../templates.d/nginx.yml | 108 +- .../templates.d/oracle.yml | 54 +- .../templates.d/panw.yml | 20 +- .../templates.d/postgresql.yml | 4 + .../templates.d/prometheus.yml | 4 +- .../templates.d/rabbitmq.yml | 4 + .../templates.d/redis.yml | 54 +- .../templates.d/santa.yml | 4 + .../templates.d/sentinel_one.yml | 44 +- .../templates.d/snort.yml | 4 + .../templates.d/stan.yml | 4 + .../templates.d/suricata.yml | 4 + .../templates.d/symantec_endpoint.yml | 4 + .../templates.d/synthetics.yml | 78 +- .../templates.d/tomcat.yml | 54 +- .../templates.d/traefik.yml | 7 +- .../templates.d/udp.yml | 26 +- .../templates.d/zeek.yml | 172 +++ 42 files changed, 2263 insertions(+), 471 deletions(-) diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml index 95d9566cf31..742df26feda 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -28,6 +32,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -40,6 +46,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -62,7 +70,6 @@ inputs: path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false period: ${kubernetes.hints.activemq.broker.period|kubernetes.hints.activemq.period|'10s'} tags: - - forwarded - activemq-broker username: ${kubernetes.hints.activemq.broker.username|kubernetes.hints.activemq.username|'admin'} - condition: ${kubernetes.hints.activemq.queue.enabled} == true or ${kubernetes.hints.activemq.enabled} == true @@ -77,7 +84,6 @@ inputs: path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false period: ${kubernetes.hints.activemq.queue.period|kubernetes.hints.activemq.period|'10s'} tags: - - forwarded - activemq-queue username: ${kubernetes.hints.activemq.queue.username|kubernetes.hints.activemq.username|'admin'} - condition: ${kubernetes.hints.activemq.topic.enabled} == true or ${kubernetes.hints.activemq.enabled} == true @@ -92,7 +98,6 @@ inputs: path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false period: ${kubernetes.hints.activemq.topic.period|kubernetes.hints.activemq.period|'10s'} tags: - - forwarded - activemq-topic username: ${kubernetes.hints.activemq.topic.username|kubernetes.hints.activemq.username|'admin'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml index e1586d3c5ea..3520dca77fc 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -1,46 +1,4 @@ inputs: - - name: filestream-apache - id: filestream-apache-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true - data_stream: - dataset: apache.access - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.apache.access.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - apache-access - - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true - data_stream: - dataset: apache.error - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.apache.error.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - apache-error - data_stream.namespace: default - name: httpjson-apache id: httpjson-apache-${kubernetes.hints.container_id} type: httpjson @@ -139,3 +97,53 @@ inputs: period: ${kubernetes.hints.apache.status.period|kubernetes.hints.apache.period|'30s'} server_status_path: /server-status data_stream.namespace: default + - name: filestream-apache + id: filestream-apache-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.access + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.error + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - apache-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml index 1d6e88f57a6..5d15a8122ea 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -22,6 +24,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml index 659dd1ec979..e4c87ed361e 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml @@ -1,25 +1,4 @@ inputs: - - name: udp-cef - id: udp-cef-${kubernetes.hints.container_id} - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true - data_stream: - dataset: cef.log - type: logs - host: localhost:9003 - processors: - - rename: - fields: - - from: message - to: event.original - - decode_cef: - field: event.original - tags: - - cef - - forwarded - data_stream.namespace: default - name: tcp-cef id: tcp-cef-${kubernetes.hints.container_id} type: tcp @@ -52,6 +31,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -67,8 +48,31 @@ inputs: field: event.original prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - cef - forwarded data_stream.namespace: default + - name: udp-cef + id: udp-cef-${kubernetes.hints.container_id} + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + host: localhost:9003 + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + tags: + - cef + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml index 97bdf20b5a4..155b98f8699 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -30,6 +32,8 @@ inputs: target: _temp_ prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml index 760582f2305..1f1319c5ce7 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline.match: after multiline.max_lines: 5000 multiline.negate: true @@ -23,6 +25,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -33,6 +37,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -43,6 +49,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml index 80cdc1bb0fa..a9adaaf36a1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml @@ -44,6 +44,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -54,6 +56,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml index a5d43104711..bc00a23cc47 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -43,6 +45,8 @@ inputs: ignore_missing: true prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.deprecation.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -52,6 +56,8 @@ inputs: - .gz$ - _slowlog.log$ - _access.log$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -60,6 +66,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.gc.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -72,6 +80,8 @@ inputs: - '^CommandLine flags: ' - '^Memory: ' - ^{ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -89,6 +99,8 @@ inputs: target: "" prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.server.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -99,6 +111,8 @@ inputs: - _slowlog.log$ - _access.log$ - _deprecation.log$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -107,6 +121,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.elasticsearch.slowlog.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true data_stream: @@ -114,6 +130,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -122,6 +140,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default - name: elasticsearch/metrics-elasticsearch diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml index 44bd23b95d6..5ce213a2ff1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +22,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - fireeye-nx diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml index 2a69222df44..be3b22b57ac 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +22,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - haproxy-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml index b98a748f878..cc2249d1b6e 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - hashicorp-vault-audit @@ -27,6 +31,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -35,6 +41,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - hashicorp-vault-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml index 8f35f1980e2..7708e49d3e5 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml @@ -1,4 +1,31 @@ inputs: + - name: iis/metrics-iis + id: iis/metrics-iis-${kubernetes.hints.container_id} + type: iis/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.application_pool + type: metrics + metricsets: + - application_pool + period: ${kubernetes.hints.iis.application_pool.period|kubernetes.hints.iis.period|'10s'} + - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.webserver + type: metrics + metricsets: + - webserver + period: ${kubernetes.hints.iis.webserver.period|kubernetes.hints.iis.period|'10s'} + - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.website + type: metrics + metricsets: + - website + period: ${kubernetes.hints.iis.website.period|kubernetes.hints.iis.period|'10s'} + data_stream.namespace: default - name: filestream-iis id: filestream-iis-${kubernetes.hints.container_id} type: filestream @@ -12,6 +39,8 @@ inputs: - .gz$ exclude_lines: - ^# + file_identity: + fingerprint: null ignore_older: 72h parsers: - container: @@ -21,6 +50,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - iis-access @@ -32,6 +63,8 @@ inputs: - .gz$ exclude_lines: - ^# + file_identity: + fingerprint: null ignore_older: 72h parsers: - container: @@ -41,34 +74,9 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - iis-error data_stream.namespace: default - - name: iis/metrics-iis - id: iis/metrics-iis-${kubernetes.hints.container_id} - type: iis/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true - data_stream: - dataset: iis.application_pool - type: metrics - metricsets: - - application_pool - period: ${kubernetes.hints.iis.application_pool.period|kubernetes.hints.iis.period|'10s'} - - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true - data_stream: - dataset: iis.webserver - type: metrics - metricsets: - - webserver - period: ${kubernetes.hints.iis.webserver.period|kubernetes.hints.iis.period|'10s'} - - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true - data_stream: - dataset: iis.website - type: metrics - metricsets: - - website - period: ${kubernetes.hints.iis.website.period|kubernetes.hints.iis.period|'10s'} - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml index 413683e2d18..98c63da565a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml @@ -14,6 +14,8 @@ inputs: _conf: tz_offset: local fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -24,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml index 9ccbf653368..4455b0bcb22 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml @@ -1,4 +1,20 @@ inputs: + - name: udp-iptables + id: udp-iptables-${kubernetes.hints.container_id} + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - iptables-log + - forwarded + data_stream.namespace: default - name: filestream-iptables id: filestream-iptables-${kubernetes.hints.container_id} type: filestream @@ -10,6 +26,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +38,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - iptables-log @@ -41,19 +61,3 @@ inputs: tags: - iptables-log data_stream.namespace: default - - name: udp-iptables - id: udp-iptables-${kubernetes.hints.container_id} - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true - data_stream: - dataset: iptables.log - type: logs - host: localhost:9001 - processors: - - add_locale: null - tags: - - iptables-log - - forwarded - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml index a167b6e182f..3532eba99f9 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -1,4 +1,37 @@ inputs: + - name: filestream-kafka + id: filestream-kafka-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.log + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + multiline: + match: after + negate: true + pattern: ^\[ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kafka.log.stream|'all'} + paths: + - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - kafka-log + data_stream.namespace: default - name: kafka/metrics-kafka id: kafka/metrics-kafka-${kubernetes.hints.container_id} type: kafka/metrics @@ -36,32 +69,3 @@ inputs: period: ${kubernetes.hints.kafka.partition.period|kubernetes.hints.kafka.period|'10s'} username: ${kubernetes.hints.kafka.partition.username|kubernetes.hints.kafka.username|''} data_stream.namespace: default - - name: filestream-kafka - id: filestream-kafka-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true - data_stream: - dataset: kafka.log - type: logs - exclude_files: - - .gz$ - multiline: - match: after - negate: true - pattern: ^\[ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.kafka.log.stream|'all'} - paths: - - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - kafka-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml index 499a6e9d659..bf5e5e33465 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.kibana.log.enabled} == true or ${kubernetes.hints.kibana.enabled} == true data_stream: @@ -25,6 +29,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -33,6 +39,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default - name: http/metrics-kibana @@ -61,6 +69,27 @@ inputs: to: kibana.background_task_utilization ignore_missing: true username: ${kubernetes.hints.kibana.background_task_utilization.username|kubernetes.hints.kibana.username|''} + - condition: ${kubernetes.hints.kibana.task_manager_metrics.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.task_manager_metrics + type: metrics + hosts: + - ${kubernetes.hints.kibana.task_manager_metrics.host|kubernetes.hints.kibana.host|'http://localhost:5601'} + method: GET + metricsets: + - json + namespace: task_manager_metrics + password: ${kubernetes.hints.kibana.task_manager_metrics.password|kubernetes.hints.kibana.password|''} + path: /api/task_manager/metrics + period: ${kubernetes.hints.kibana.task_manager_metrics.period|kubernetes.hints.kibana.period|'10s'} + processors: + - rename: + fail_on_error: false + fields: + - from: http.task_manager_metrics + to: kibana.task_manager_metrics + ignore_missing: true + username: ${kubernetes.hints.kibana.task_manager_metrics.username|kubernetes.hints.kibana.username|''} data_stream.namespace: default - name: kibana/metrics-kibana id: kibana/metrics-kibana-${kubernetes.hints.container_id} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml index 89c3aed53ca..7b889c42cf4 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml @@ -1,32 +1,4 @@ inputs: - - name: logstash/metrics-logstash - id: logstash/metrics-logstash-${kubernetes.hints.container_id} - type: logstash/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true - data_stream: - dataset: logstash.stack_monitoring.node - type: metrics - hosts: - - ${kubernetes.hints.logstash.node.host|kubernetes.hints.logstash.host|'http://localhost:9600'} - metricsets: - - node - password: ${kubernetes.hints.logstash.node.password|kubernetes.hints.logstash.password|''} - period: ${kubernetes.hints.logstash.node.period|kubernetes.hints.logstash.period|'10s'} - username: ${kubernetes.hints.logstash.node.username|kubernetes.hints.logstash.username|''} - - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true - data_stream: - dataset: logstash.stack_monitoring.node_stats - type: metrics - hosts: - - ${kubernetes.hints.logstash.node_stats.host|kubernetes.hints.logstash.host|'http://localhost:9600'} - metricsets: - - node_stats - password: ${kubernetes.hints.logstash.node_stats.password|kubernetes.hints.logstash.password|''} - period: ${kubernetes.hints.logstash.node_stats.period|kubernetes.hints.logstash.period|'10s'} - username: ${kubernetes.hints.logstash.node_stats.username|kubernetes.hints.logstash.username|''} - data_stream.namespace: default - name: filestream-logstash id: filestream-logstash-${kubernetes.hints.container_id} type: filestream @@ -38,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -56,6 +30,8 @@ inputs: target: "" prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.logstash.slowlog.enabled} == true or ${kubernetes.hints.logstash.enabled} == true data_stream: @@ -63,6 +39,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -77,5 +55,329 @@ inputs: target: "" prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default + - name: logstash/metrics-logstash + id: logstash/metrics-logstash-${kubernetes.hints.container_id} + type: logstash/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.logstash.node.host|kubernetes.hints.logstash.host|'http://localhost:9600'} + metricsets: + - node + password: ${kubernetes.hints.logstash.node.password|kubernetes.hints.logstash.password|''} + period: ${kubernetes.hints.logstash.node.period|kubernetes.hints.logstash.period|'10s'} + username: ${kubernetes.hints.logstash.node.username|kubernetes.hints.logstash.username|''} + - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.logstash.node_stats.host|kubernetes.hints.logstash.host|'http://localhost:9600'} + metricsets: + - node_stats + password: ${kubernetes.hints.logstash.node_stats.password|kubernetes.hints.logstash.password|''} + period: ${kubernetes.hints.logstash.node_stats.period|kubernetes.hints.logstash.period|'10s'} + username: ${kubernetes.hints.logstash.node_stats.username|kubernetes.hints.logstash.username|''} + data_stream.namespace: default + - name: cel-logstash + id: cel-logstash-${kubernetes.hints.container_id} + type: cel + use_output: default + streams: + - auth.basic.password: null + auth.basic.user: null + condition: ${kubernetes.hints.logstash.node_cel.enabled} == true and ${kubernetes.hints.logstash.enabled} == true + config_version: "2" + data_stream: + dataset: logstash.node + type: metrics + interval: ${kubernetes.hints.logstash.node_cel.period|kubernetes.hints.logstash.period|'30s'} + program: "get(state.url)\n.as(resp, bytes(resp.Body)\n.decode_json().as(body,\n {\n \"logstash\":{\n \"elasticsearch\": has(body.pipelines) \n ? {\n \"cluster\":{\n \"id\":body.pipelines.map(pipeline_name, pipeline_name != \".monitoring-logstash\", has(body.pipelines[pipeline_name].vertices)\n ? body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), vertex.cluster_uuid) \n : []).flatten(),\n }\n }\n : {},\n \"node\":{\n \"stats\":{\n \"events\":body.events,\n \"jvm\":{\n \"uptime_in_millis\":body.jvm.uptime_in_millis,\n \"mem\":[body.jvm['mem']].drop(\"pools\")[0],\n \"threads\":body.jvm.threads\n },\n \"queue\":body.queue,\n \"reloads\":body.reloads,\n \"process\":body.process,\n \"os\":{\n \"cpu\":body.process.cpu,\n \"cgroup\":has(body.os.group) ? body.os.cgroup : {},\n },\n \"logstash\":{\n \"ephemeral_id\":body.ephemeral_id,\n \"host\":body.host,\n \"http_address\":body.http_address,\n \"name\":body.name,\n \"pipeline\":body.pipeline,\n \"pipelines\":body.pipelines.map(pipeline, pipeline != '.monitoring-logstash', [pipeline]).flatten(),\n \"snapshot\":body.snapshot,\n \"status\":body.status,\n \"uuid\":body.id,\n \"version\":body.version,\n }\n }}\n }})\n)\n.as(eve, {\n \"events\":[eve]\n})" + redact: + fields: null + resource.url: http://localhost:9600/_node/stats?graph=true&vertices=true + - auth.basic.password: null + auth.basic.user: null + condition: ${kubernetes.hints.logstash.pipeline.enabled} == true and ${kubernetes.hints.logstash.enabled} == true + config_version: "2" + data_stream: + dataset: logstash.pipeline + type: metrics + interval: ${kubernetes.hints.logstash.pipeline.period|kubernetes.hints.logstash.period|'30s'} + program: | + get(state.url).as(resp, bytes(resp.Body).decode_json().as(body, + body.pipelines.map(pipeline_name, pipeline_name != ".monitoring-logstash", { + "name": pipeline_name, + "elasticsearch.cluster.id": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), vertex.cluster_uuid) + : + [], + "host":{ + "name":body.name, + "address":body.http_address, + }, + "total":{ + "flow":body.pipelines[pipeline_name].flow, + "time":{ + "queue_push_duration": { + "ms": has(body.pipelines[pipeline_name].events.queue_push_duration_in_millis) ? + body.pipelines[pipeline_name].events.queue_push_duration_in_millis + : + [], + }, + "duration":{ + "ms": has(body.pipelines[pipeline_name].events.duration_in_millis) ? + body.pipelines[pipeline_name].events.duration_in_millis + : + [], + }, + }, + "reloads":{ + "successes":body.pipelines[pipeline_name].reloads.successes, + "failures":body.pipelines[pipeline_name].reloads.failures + }, + "events":{ + "out": has(body.pipelines[pipeline_name].events.out) ? + body.pipelines[pipeline_name].events.out + : + [], + "in": has(body.pipelines[pipeline_name].events.out) ? // This deliberately uses 'out' as `has` does not accept `in` + body.pipelines[pipeline_name].events['in'] + : + [], + "filtered": has(body.pipelines[pipeline_name].events.filtered) ? + body.pipelines[pipeline_name].events.filtered + : + [], + }, + "queues":{ + "type": has(body.pipelines[pipeline_name].queue.type) ? + body.pipelines[pipeline_name].queue.type + : + [], + "events": has(body.pipelines[pipeline_name].queue.events_count) ? + body.pipelines[pipeline_name].queue.events_count + : + [], + "current_size":{ + "bytes": has(body.pipelines[pipeline_name].queue.queue_size_in_bytes) ? + body.pipelines[pipeline_name].queue.queue_size_in_bytes + : + [], + }, + "max_size":{ + "bytes": has(body.pipelines[pipeline_name].queue.max_queue_size_in_bytes) ? + body.pipelines[pipeline_name].queue.max_queue_size_in_bytes + : + [], + } + } + } + }))).as(pipelines, { + "events": pipelines.map(pipeline, { + "logstash": {"pipeline":pipeline} + }) + }) + redact: + fields: null + resource.url: http://localhost:9600/_node/stats?graph=true&vertices=true + - auth.basic.password: null + auth.basic.user: null + condition: ${kubernetes.hints.logstash.plugins.enabled} == true and ${kubernetes.hints.logstash.enabled} == true + config_version: "2" + data_stream: + dataset: logstash.plugins + type: metrics + interval: ${kubernetes.hints.logstash.plugins.period|kubernetes.hints.logstash.period|'1m'} + program: | + get(state.url + "/stats?graph=true&vertices=true").as(resp, bytes(resp.Body).decode_json().as(body, + body.pipelines.map(pipeline_name, pipeline_name != ".monitoring-logstash", body.pipelines[pipeline_name].with({ + "name":pipeline_name, + "pipeline_source_map": + get(state.url + "/pipelines/" + pipeline_name + "?graph=true&vertices=true").as(resp, + bytes(resp.Body).decode_json().as(pipes, + has(pipes.pipeline) ? + pipes.pipelines.map(pipeline_name, + has(pipes.pipelines) && has(pipes.pipelines[pipeline_name].graph) && pipes.pipelines != null && pipes.pipelines[pipeline_name].graph.graph.vertices != null, + pipes.pipelines[pipeline_name].graph.graph.vertices.map(vertex, vertex.type == "plugin", { + "plugin_id": vertex.id, + "source": vertex.meta.source, + }) + ).drop("graph").flatten() + : + [] + ) + ), + "es_cluster_id": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), vertex.cluster_uuid) + : + [], + "es_cluster_id_map": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.cluster_uuid), { + "plugin_id": vertex.id, + "cluster_id": vertex.cluster_uuid, + }) + : + [], + "counter_map": has(body.pipelines[pipeline_name].vertices) ? + body.pipelines[pipeline_name].vertices.map(vertex, has(vertex.long_counters), vertex.long_counters.map(counter, { + "plugin_id": vertex.id, + "name": counter.name, + "value": counter.value + })) + : + [], + "outputs": body.pipelines[pipeline_name].plugins.outputs, + "inputs": body.pipelines[pipeline_name].plugins.inputs, + "filters": body.pipelines[pipeline_name].plugins.filters, + "codecs": body.pipelines[pipeline_name].plugins.codecs, + "host":{ + "name": body.name, + "address": body.http_address, + } + })))).as(events, events.map(event, { + "inputs": event.inputs.map(input, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "input", + "input": { + "source":event.pipeline_source_map.map(tuple, (tuple.plugin_id == input.id), tuple.source).flatten().as(source, (source.size() != 0) ? source[0] : ""), + "elasticsearch.cluster.id": event.es_cluster_id_map.map(tuple, tuple.plugin_id == input.id, tuple.cluster_id), + "metrics": { + input.name: event.counter_map.flatten().filter(tuple, tuple.plugin_id == input.id).as(counter_map, zip( + counter_map.map(tuple, tuple.name), + counter_map.map(tuple, tuple.value) + )) + }, + "name": input.name, + "id": input.id, + "flow": has(input.flow) ? + input.flow + : + {}, + "events": { + "out": input.events.out, + }, + "time": { + "queue_push_duration": { + "ms": input.events.queue_push_duration_in_millis + } + } + } + } + }.drop_empty()), + "codecs": event.codecs.map(codec, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "codec", + "codec": { + "id":codec.id, + "name":codec.name, + "flow": has(codec.flow) ? codec.flow : {}, + "decode":{ + "duration":{ + "ms":codec.decode.duration_in_millis + }, + "in":codec.decode.writes_in, + "out":codec.decode.out, + }, + "encode":{ + "in":codec.encode.writes_in, + "duration":{ + "ms":codec.encode.duration_in_millis + } + } + } + } + }.drop_empty()), + "filters": event.filters.map(filter, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "filter", + "filter": { + "source":event.pipeline_source_map.map(tuple, (tuple.plugin_id == filter.id), tuple.source).flatten().as(source, (source.size() != 0) ? source[0] : ""), + "id": filter.id, + "name": filter.name, + "elasticsearch.cluster.id": event.es_cluster_id_map.map(tuple, tuple.plugin_id == filter.id, tuple.cluster_id), + "metrics": { + filter.name: event.counter_map.flatten().filter(tuple, tuple.plugin_id == filter.id).as(counter_map, zip( + counter_map.map(tuple, tuple.name), + counter_map.map(tuple, tuple.value) + )) + }, + "flow": has(filter.flow) ? + filter.flow + : + {}, + "events": { + "in": filter.events['in'], + "out": filter.events.out, + }, + "time": { + "duration": { + "ms": filter.events.duration_in_millis + } + } + } + } + }.drop_empty()), + "outputs": event.outputs.map(output, has(event.hash), { + "name": event.name, + "id": event.hash, + "host": event.host, + "elasticsearch.cluster.id": event.es_cluster_id, + "plugin": { + "type": "output", + "output": { + "id": output.id, + "name": output.name, + "source":event.pipeline_source_map.map(tuple, (tuple.plugin_id == output.id), tuple.source).flatten().as(source, (source.size() != 0) ? source[0] : ""), + "elasticsearch.cluster.id": event.es_cluster_id_map.map(tuple, tuple.plugin_id == output.id, tuple.cluster_id), + "metrics": { + output.name: event.counter_map.flatten().filter(tuple, tuple.plugin_id == output.id).as(counter_map, zip( + counter_map.map(tuple, tuple.name), + counter_map.map(tuple, tuple.value) + )) + }, + "flow": has(output.flow) ? + output.flow + : + {}, + "events":{ + "in":output.events['in'], + "out":output.events.out, + }, + "time":{ + "duration":{ + "ms":output.events.duration_in_millis + } + } + } + } + }.drop_empty()) + }).collate(["filters", "outputs", "inputs", "codecs"])).as(plugins, { + "events": plugins.map(plugin, { + "logstash":{"pipeline":plugin} + }) + }) + redact: + fields: null + resource.url: http://localhost:9600/_node + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml index 61235d40965..10b05293429 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml @@ -9,7 +9,9 @@ inputs: dataset: mattermost.audit type: logs exclude_files: - - .gz$ + - \.gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - mattermost-audit diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml index 5c9eb7fddc2..0a5ab1ef6cd 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml @@ -23,6 +23,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -35,6 +37,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - mssql-logs @@ -99,6 +103,7 @@ inputs: dataset: microsoft_sqlserver.transaction_log type: metrics driver: mssql + fetch_from_all_databases: false hosts: - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|kubernetes.hints.microsoft_sqlserver.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|kubernetes.hints.microsoft_sqlserver.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|kubernetes.hints.microsoft_sqlserver.host|'localhost'} metricsets: @@ -110,24 +115,24 @@ inputs: response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('master')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('master') ; response_format: table - - query: USE [master] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('master') ; + - query: USE [master]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('master') ; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', database_id FROM sys.databases WHERE name='model'; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('model')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('model') ; response_format: table - - query: USE [model] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('model') ; + - query: USE [model]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('model') ; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', database_id FROM sys.databases WHERE name='tempdb'; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('tempdb')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('tempdb') ; response_format: table - - query: USE [tempdb] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('tempdb') ; + - query: USE [tempdb]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('tempdb') ; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', database_id FROM sys.databases WHERE name='msdb'; response_format: table - query: SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_mb, l.active_log_size_mb,l.log_backup_time,l.log_since_last_log_backup_mb,l.log_since_last_checkpoint_mb,l.log_recovery_size_mb from sys.dm_db_log_stats(DB_ID('msdb')) l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('msdb') ; response_format: table - - query: USE [msdb] ; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('msdb') ; + - query: USE [msdb]; SELECT @@servername AS server_name, @@servicename AS instance_name, name As 'database_name', l.database_id, l.total_log_size_in_bytes As total_log_size_bytes, l.used_log_space_in_bytes As used_log_space_bytes, l.used_log_space_in_percent As used_log_space_pct, l.log_space_in_bytes_since_last_backup from sys.dm_db_log_space_usage l INNER JOIN sys.databases s ON l.database_id = s.database_id WHERE s.database_id = DB_ID('msdb') ; response_format: table data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml index c7a82f2812c..1e029fbba34 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml @@ -1,9 +1,1148 @@ inputs: + - name: cel-mimecast + id: cel-mimecast-${kubernetes.hints.container_id} + type: cel + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.archive_search_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.archive_search_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: logs + end_field: end + look_back: 24h + page_size: 100 + path: /api/archive/get-archive-search-logs + start_field: start + time_field: createTime + tags: + - forwarded + - mimecast-archive-search-logs + - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.audit_events + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "state.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token:\"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n {\n \"data\": state.?last_page.data.orValue([{\n state.start_field: state.?cursor.last.orValue(now - duration(state.look_back)).format(time_layout.RFC3339),\n state.end_field: now.format(time_layout.RFC3339),\n }]),\n }.as(req,\n post_request(state.url.trim_right(\"/\") + state.path, \"application/json\", \n {\n \"meta\": {\n \"pagination\": {\n \"pageSize\": state.page_size,\n ?\"pageToken\": state.?last_page.next,\n }\n },\n \"data\": req.data,\n }.encode_json()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ?\n {\n \"events\": body.data.map(e, {\"message\": e.encode_json()}),\n \"cursor\": {\n \"last\": ([now] + body.data.map(e, e[state.time_field].parse_time([\"2006-01-02T15:04:05-0700\", time_layout.RFC3339]))).max(),\n },\n ?\"last_page\": has(body.?meta.pagination.next) && size(body.data) != 0 ?\n optional.of({\n ?\"next\": body.?meta.pagination.next,\n \"data\": req.data,\n })\n :\n optional.none(),\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": has(body.?meta.pagination.next) && size(body.data) != 0,\n }\n :\n // Mimecast can return failure states with a 200. This\n // is detected by a non-empty fail array at the root\n // of the response body. Don't attempt to parse this\n // out, just dump the whole body into the error message.\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \":\" + string(resp.Body), // We know this is not empty.\n },\n },\n \"want_more\": false,\n }\n )\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \":\" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + end_field: endDateTime + look_back: 24h + page_size: 100 + path: /api/audit/get-audit-events + start_field: startDateTime + time_field: eventTime + tags: + - forwarded + - mimecast-audit-events + - condition: ${kubernetes.hints.mimecast.cloud_integrated_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.cloud_integrated_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between cloud_integrated_logs and siem_logs\n// If it is changed here changes should be reflected in the other data\n// streams. Do not differentiate the logic between these data streams\n// lightly; use the state variable for this unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n state.?cursor.work_list.orValue(state.types.map(t, {\"type\": t})).as(work_list, size(work_list) == 0 ?\n state.types.map(t, {\"type\": t})\n :\n work_list\n ).as(work_list,\n get_request(\n state.url.trim_right(\"/\") + state.path + \"?\" + {\n \"type\": [work_list[0].type],\n ?\"nextPage\": work_list[0].?next.optMap(next, [next]),\n ?\"dateRangeStartsAt\": state.?start.optMap(start, [start.format(\"2006-01-02\")]),\n ?\"dateRangeEndsAt\": state.?end.optMap(end, [end.format(\"2006-01-02\")]),\n ?\"pageSize\": state.?page_size.optMap(size, [string(int(size))]),\n }.format_query()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body,\n {\n \"events\": body.value.map(b, has(b.url),\n get(b.url).as(batch, batch.StatusCode == 200 ?\n bytes(batch.Body).mime(\"application/gzip\").mime(\"application/x-ndjson\").map(e,\n {\n \"message\": dyn(e.encode_json()),\n }\n )\n :\n [{\n \"error\": {\n \"code\": string(batch.StatusCode),\n \"id\": string(batch.Status),\n \"message\": \"GET \" + b.url + \": \" + (\n size(batch.Body) != 0 ?\n string(batch.Body)\n :\n string(batch.Status) + ' (' + string(batch.StatusCode) + ')'\n ),\n },\n }]\n )\n ).flatten(),\n \"cursor\": {\n \"work_list\": (\n \"@nextPage\" in body && size(body.value) != 0 ?\n [work_list[0].with({\"next\": body[\"@nextPage\"]})]\n :\n []\n ) + tail(work_list),\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": \"@nextPage\" in body && size(body.value) != 0,\n }.as(to_publish, to_publish.with({\n \"want_more\": to_publish.want_more || size(to_publish.cursor.work_list) != 0,\n }))\n ).as(state, \n // Check whether we still need to get more, but have\n // no event for this type. If we do, populate events\n // with a place-holder to be discarded by the ingest\n // pipeline.\n state.want_more && size(state.events) == 0 ?\n state.with({\"events\": [{\"message\": \"want_more\"}]})\n :\n state\n )\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"GET \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + end_field: dateRangeEndsAt + look_back: 24h + page_size: null + path: /siem/v1/batch/events/ci + start_field: dateRangeStartsAt + types: + - entities + - mailflow + - urlclick + tags: + - forwarded + - mimecast-cloud-integrated-logs + - condition: ${kubernetes.hints.mimecast.dlp_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.dlp_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: dlpLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/dlp/get-logs + start_field: from + time_field: eventTime + tags: + - forwarded + - mimecast-dlp-logs + - condition: ${kubernetes.hints.mimecast.message_release_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.message_release_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: heldReleaseLogs + end_field: end + look_back: 24h + page_size: 100 + path: /api/gateway/get-held-release-logs + start_field: start + time_field: released + tags: + - forwarded + - mimecast-message-release-logs + - condition: ${kubernetes.hints.mimecast.siem_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.siem_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between cloud_integrated_logs and siem_logs\n// If it is changed here changes should be reflected in the other data\n// streams. Do not differentiate the logic between these data streams\n// lightly; use the state variable for this unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n state.?cursor.work_list.orValue(state.types.map(t, {\"type\": t})).as(work_list, size(work_list) == 0 ?\n state.types.map(t, {\"type\": t})\n :\n work_list\n ).as(work_list,\n get_request(\n state.url.trim_right(\"/\") + state.path + \"?\" + {\n \"type\": [work_list[0].type],\n ?\"nextPage\": work_list[0].?next.optMap(next, [next]),\n ?\"dateRangeStartsAt\": state.?start.optMap(start, [start.format(\"2006-01-02\")]),\n ?\"dateRangeEndsAt\": state.?end.optMap(end, [end.format(\"2006-01-02\")]),\n ?\"pageSize\": state.?page_size.optMap(size, [string(int(size))]),\n }.format_query()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body,\n {\n \"events\": body.value.map(b, has(b.url),\n get(b.url).as(batch, batch.StatusCode == 200 ?\n bytes(batch.Body).mime(\"application/gzip\").mime(\"application/x-ndjson\").map(e,\n {\n \"message\": dyn(e.encode_json()),\n }\n )\n :\n [{\n \"error\": {\n \"code\": string(batch.StatusCode),\n \"id\": string(batch.Status),\n \"message\": \"GET \" + b.url + \": \" + (\n size(batch.Body) != 0 ?\n string(batch.Body)\n :\n string(batch.Status) + ' (' + string(batch.StatusCode) + ')'\n ),\n },\n }]\n )\n ).flatten(),\n \"cursor\": {\n \"work_list\": (\n \"@nextPage\" in body && size(body.value) != 0 ?\n [work_list[0].with({\"next\": body[\"@nextPage\"]})]\n :\n []\n ) + tail(work_list),\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": \"@nextPage\" in body && size(body.value) != 0,\n }.as(to_publish, to_publish.with({\n \"want_more\": to_publish.want_more || size(to_publish.cursor.work_list) != 0,\n }))\n ).as(state, \n // Check whether we still need to get more, but have\n // no event for this type. If we do, populate events\n // with a place-holder to be discarded by the ingest\n // pipeline.\n state.want_more && size(state.events) == 0 ?\n state.with({\"events\": [{\"message\": \"want_more\"}]})\n :\n state\n )\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"GET \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + end_field: dateRangeEndsAt + look_back: 24h + page_size: null + path: /siem/v1/batch/events/cg + start_field: dateRangeStartsAt + types: + - av + - delivery + - internal email protect + - impersonation protect + - journal + - process + - receipt + - attachment protect + - spam + - url protect + tags: + - forwarded + - mimecast-siem-logs + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_customer.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.threat_intel_malware_customer + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between threat_intel_malware_customer and\n// threat_intel_malware_grid. If it is changed here changes should be\n// reflected in the other data streams. Do not differentiate the logic\n// between these data streams lightly; use the state variable for this\n// unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n {\n \"data\": state.?last_page.data.orValue([{\n ?\"start\": has(state.?cursor.token) ? optional.none() :\n optional.of(state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339))),\n ?\"end\": has(state.?cursor.token) ? optional.none() :\n optional.of(now.format(time_layout.RFC3339)),\n \"feedType\": state.feed_type,\n ?\"token\": state.?cursor.token,\n \"fileType\": \"stix\",\n }]),\n }.as(req,\n post_request(state.url.trim_right(\"/\") + state.path, \"application/json\", \n req.encode_json()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ?\n {\n \"events\": body.objects.map(e, e.type == \"indicator\", {\"message\": e.encode_json()}),\n \"cursor\": {\n // The last timestamp may step past the last timestamp\n // seen for an indicator. We assume here that if another\n // type has a later timestamp, then the time at the API\n // has progressed past the last indicator and we do not\n // need to reach back that far.\n \"last\": ([now] + body.objects.map(e, timestamp(e.modified))).max().format(time_layout.RFC3339),\n ?\"token\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"][?0],\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"].hasValue(),\n }\n :\n // Mimecast can return failure states with a 200. This\n // is detected by a non-empty fail array at the root\n // of the response body. Don't attempt to parse this\n // out, just dump the whole body into the error message.\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + string(resp.Body), // We know this is not empty.\n },\n },\n \"want_more\": false,\n }\n )\n : resp.StatusCode == 429 ?\n // For reasons, Mimecast does not set X-RateLimit-* headers\n // until the rate limit has been exceeded, so treat 429 codes\n // as a sentinel to back off. We don't want to log errors and\n // we do not want to update the cursor, so return an empty\n // events array.\n {\n \"events\": [],\n // Log the rate limit excession at DEBUG level.\n \"rate_limited\": debug(\"rate_limit_exceeded\", bytes(resp.Body).decode_json().?fail[0].message.orValue(\"missing message\")),\n \"want_more\": false,\n }\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + feed_type: malware_customer + look_back: 24h + page_size: null + path: /api/ttp/threat-intel/get-feed + tags: + - forwarded + - mimecast-threat-intel-feed-malware-customer + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_grid.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.threat_intel_malware_grid + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: "// This program is shared between threat_intel_malware_customer and\n// threat_intel_malware_grid. If it is changed here changes should be\n// reflected in the other data streams. Do not differentiate the logic\n// between these data streams lightly; use the state variable for this\n// unless absolutely required.\nstate.with(\n (\n (has(state.?token.expires) && now() < timestamp(state.token.expires)) ?\n // The token we have is still valid.\n state.token\n :\n // Get a new token.\n post_request(state.url.trim_right(\"/\") + \"/oauth/token\", \"application/x-www-form-urlencoded\",\n {\n \"client_id\": [state.client_id],\n \"client_secret\": [state.client_secret],\n \"grant_type\": [\"client_credentials\"],\n }.format_query()\n ).do_request().as(auth, auth.StatusCode == 200 ?\n bytes(auth.Body).decode_json().as(auth_body, auth_body.with({\n // Include 60s grace period to avoid attempting to make\n // a request with a stale authentication token.\n \"expires\": now()+duration(string(int(auth_body.expires_in)-60)+\"s\"),\n }))\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(auth.StatusCode),\n \"id\": string(auth.Status),\n \"message\": \"POST /oauth/token: \"+(\n size(auth.Body) != 0 ?\n string(auth.Body)\n :\n string(auth.Status) + ' (' + string(auth.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n ).as(token, !has(token.access_token) ? token :\n {\n \"data\": state.?last_page.data.orValue([{\n ?\"start\": has(state.?cursor.token) ? optional.none() :\n optional.of(state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339))),\n ?\"end\": has(state.?cursor.token) ? optional.none() :\n optional.of(now.format(time_layout.RFC3339)),\n \"feedType\": state.feed_type,\n ?\"token\": state.?cursor.token,\n \"fileType\": \"stix\",\n }]),\n }.as(req,\n post_request(state.url.trim_right(\"/\") + state.path, \"application/json\", \n req.encode_json()\n ).with({\n \"Header\": {\n \"Authorization\": [\"Bearer \" + token.access_token], \n \"Accept\": [\"application/json\"],\n \"Content-Type\": [\"application/json\"],\n }\n }).do_request().as(resp, resp.StatusCode == 200 ?\n bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ?\n {\n \"events\": body.objects.map(e, e.type == \"indicator\", {\"message\": e.encode_json()}),\n \"cursor\": {\n // The last timestamp may step past the last timestamp\n // seen for an indicator. We assume here that if another\n // type has a later timestamp, then the time at the API\n // has progressed past the last indicator and we do not\n // need to reach back that far.\n \"last\": ([now] + body.objects.map(e, timestamp(e.modified))).max().format(time_layout.RFC3339),\n ?\"token\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"][?0],\n },\n \"token\": {\n \"access_token\": token.access_token,\n \"expires\": token.expires,\n },\n \"want_more\": resp.?Header[\"X-Mc-Threat-Feed-Next-Token\"].hasValue(),\n }\n :\n // Mimecast can return failure states with a 200. This\n // is detected by a non-empty fail array at the root\n // of the response body. Don't attempt to parse this\n // out, just dump the whole body into the error message.\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + string(resp.Body), // We know this is not empty.\n },\n },\n \"want_more\": false,\n }\n )\n : resp.StatusCode == 429 ?\n // For reasons, Mimecast does not set X-RateLimit-* headers\n // until the rate limit has been exceeded, so treat 429 codes\n // as a sentinel to back off. We don't want to log errors and\n // we do not want to update the cursor, so return an empty\n // events array.\n {\n \"events\": [],\n // Log the rate limit excession at DEBUG level.\n \"rate_limited\": debug(\"rate_limit_exceeded\", bytes(resp.Body).decode_json().?fail[0].message.orValue(\"missing message\")),\n \"want_more\": false,\n }\n :\n {\n \"events\": {\n \"error\": {\n \"code\": string(resp.StatusCode),\n \"id\": string(resp.Status),\n \"message\": \"POST \" + state.path + \": \" + (\n size(resp.Body) != 0 ?\n string(resp.Body)\n :\n string(resp.Status) + ' (' + string(resp.StatusCode) + ')'\n ),\n },\n },\n \"want_more\": false,\n }\n )\n )\n )\n)\n" + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + feed_type: malware_grid + look_back: 24h + page_size: null + path: /api/ttp/threat-intel/get-feed + tags: + - forwarded + - mimecast-threat-intel-feed-malware-grid + - condition: ${kubernetes.hints.mimecast.ttp_ap_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.ttp_ap_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: attachmentLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/ttp/attachment/get-logs + start_field: from + time_field: date + tags: + - forwarded + - mimecast-ttp-ap + - condition: ${kubernetes.hints.mimecast.ttp_ip_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.ttp_ip_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: impersonationLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/ttp/impersonation/get-logs + start_field: from + time_field: eventTime + tags: + - forwarded + - mimecast-ttp-ip + - condition: ${kubernetes.hints.mimecast.ttp_url_logs.enabled} == true and ${kubernetes.hints.mimecast.enabled} == true + config_version: 2 + data_stream: + dataset: mimecast.ttp_url_logs + type: logs + fields_under_root: true + interval: 5m + keep_null: true + program: | + // This program is shared amongst archive_search_logs, dlp_logs, + // message_release_logs, ttp_ap_logs, ttp_ip_logs, and ttp_url_logs. + // If it is changed here changes should be reflected in the other + // data streams. Do not differentiate the logic between these data + // streams lightly; use the state variable for this unless absolutely + // required. + state.with( + ( + (has(state.?token.expires) && now() < timestamp(state.token.expires)) ? + // The token we have is still valid. + state.token + : + // Get a new token. + post_request(state.url.trim_right("/") + "/oauth/token", "application/x-www-form-urlencoded", + { + "client_id": [state.client_id], + "client_secret": [state.client_secret], + "grant_type": ["client_credentials"], + }.format_query() + ).do_request().as(auth, auth.StatusCode == 200 ? + bytes(auth.Body).decode_json().as(auth_body, auth_body.with({ + // Include 60s grace period to avoid attempting to make + // a request with a stale authentication token. + "expires": now()+duration(string(int(auth_body.expires_in)-60)+"s"), + })) + : + { + "events": { + "error": { + "code": string(auth.StatusCode), + "id": string(auth.Status), + "message": "POST /oauth/token: "+( + size(auth.Body) != 0 ? + string(auth.Body) + : + string(auth.Status) + ' (' + string(auth.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ).as(token, !has(token.access_token) ? token : + { + "data": state.?last_page.data.orValue([{ + state.start_field: state.?cursor.last.orValue((now - duration(state.look_back)).format(time_layout.RFC3339)), + state.end_field: now.format(time_layout.RFC3339), + }]), + }.as(req, + post_request(state.url.trim_right("/") + state.path, "application/json", + { + "meta": { + "pagination": { + "pageSize": state.page_size, + ?"pageToken": state.?last_page.next, + } + }, + "data": req.data, + }.encode_json() + ).with({ + "Header": { + "Authorization": ["Bearer " + token.access_token], + "Accept": ["application/json"], + "Content-Type": ["application/json"], + } + }).do_request().as(resp, resp.StatusCode == 200 ? + bytes(resp.Body).decode_json().as(body, body.?fail.orValue([]).size() == 0 ? + { + "events": body.data.map(e, e[state.data_path].map(l, {"message": l.encode_json()})).flatten(), + "cursor": { + "last": ([now] + body.data.map(e, + e[state.data_path].map(l, + l[state.time_field].parse_time(["2006-01-02T15:04:05-0700", time_layout.RFC3339]) + ) + ).flatten()).max().format(time_layout.RFC3339), + }, + ?"last_page": has(body.?meta.pagination.next) && size(body.data) != 0 ? + optional.of({ + ?"next": body.?meta.pagination.next, + "data": req.data, + }) + : + optional.none(), + "token": { + "access_token": token.access_token, + "expires": token.expires, + }, + "want_more": has(body.?meta.pagination.next) && size(body.data) != 0, + } + : + // Mimecast can return failure states with a 200. This + // is detected by a non-empty fail array at the root + // of the response body. Don't attempt to parse this + // out, just dump the whole body into the error message. + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ":" + string(resp.Body), // We know this is not empty. + }, + }, + "want_more": false, + } + ) + : + { + "events": { + "error": { + "code": string(resp.StatusCode), + "id": string(resp.Status), + "message": "POST " + state.path + ": " + ( + size(resp.Body) != 0 ? + string(resp.Body) + : + string(resp.Status) + ' (' + string(resp.StatusCode) + ')' + ), + }, + }, + "want_more": false, + } + ) + ) + ) + ) + redact: + fields: + - client_id + - client_secret + - token.access_token + resource.url: https://api.services.mimecast.com + state: + client_id: null + client_secret: null + data_path: clickLogs + end_field: to + look_back: 24h + page_size: 100 + path: /api/ttp/url/get-logs + start_field: from + time_field: date + tags: + - forwarded + - mimecast-ttp-url + data_stream.namespace: default + - name: filestream-mimecast + id: filestream-mimecast-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true + data_stream: + dataset: mimecast.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default - name: httpjson-mimecast id: httpjson-mimecast-${kubernetes.hints.container_id} type: httpjson use_output: default streams: + - condition: ${kubernetes.hints.mimecast.archive_search_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.createTime]]' + data_stream: + dataset: mimecast.archive_search_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + target: body.meta.pagination.pageSize + value: 100 + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/archive/get-archive-search-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/archive/get-archive-search-logs + response.decode_as: application/json + response.pagination: + - delete: + target: body.data + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: |- + [[- if index .last_response.body.meta.pagination "next" -]] + [[- .last_response.body.meta.pagination.next -]] + [[- end -]] + response.split: + ignore_empty_value: true + split: + ignore_empty_value: true + keep_parent: false + target: body.logs + target: body.data + tags: + - forwarded + - mimecast-archive-search-logs - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true config_version: "2" cursor: @@ -135,6 +1274,7 @@ inputs: request.url: https://eu-api.mimecast.com/api/audit/get-siem-logs response.decode_as: application/zip response.split: + ignore_empty_value: true target: body.data transforms: - set: @@ -364,25 +1504,3 @@ inputs: - forwarded - mimecast-ttp-url data_stream.namespace: default - - name: filestream-mimecast - id: filestream-mimecast-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true - data_stream: - dataset: mimecast.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml index bfdfd059ebe..cc55ebbcf73 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml @@ -14,6 +14,8 @@ inputs: _conf: tz_offset: local fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -24,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - modsec-audit diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml index 6af480629d2..b0bd0b07245 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - mongodb-logs @@ -38,7 +42,7 @@ inputs: password: ${kubernetes.hints.mongodb.collstats.password|kubernetes.hints.mongodb.password|''} period: ${kubernetes.hints.mongodb.collstats.period|kubernetes.hints.mongodb.period|'10s'} ssl.certificate: null - ssl.enabled: null + ssl.enabled: false ssl.key: null ssl.verification_mode: null username: ${kubernetes.hints.mongodb.collstats.username|kubernetes.hints.mongodb.username|''} @@ -68,7 +72,7 @@ inputs: password: ${kubernetes.hints.mongodb.metrics.password|kubernetes.hints.mongodb.password|''} period: ${kubernetes.hints.mongodb.metrics.period|kubernetes.hints.mongodb.period|'10s'} ssl.certificate: null - ssl.enabled: null + ssl.enabled: false ssl.key: null ssl.verification_mode: null username: ${kubernetes.hints.mongodb.metrics.username|kubernetes.hints.mongodb.username|''} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml index df50544f5d9..b21edd74269 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml @@ -1,43 +1,4 @@ inputs: - - name: mysql/metrics-mysql - id: mysql/metrics-mysql-${kubernetes.hints.container_id} - type: mysql/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true - data_stream: - dataset: mysql.galera_status - type: metrics - hosts: - - ${kubernetes.hints.mysql.galera_status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} - metricsets: - - galera_status - password: ${kubernetes.hints.mysql.galera_status.password|kubernetes.hints.mysql.password|'test'} - period: ${kubernetes.hints.mysql.galera_status.period|kubernetes.hints.mysql.period|'10s'} - username: ${kubernetes.hints.mysql.galera_status.username|kubernetes.hints.mysql.username|'root'} - - condition: ${kubernetes.hints.mysql.performance.enabled} == true or ${kubernetes.hints.mysql.enabled} == true - data_stream: - dataset: mysql.performance - type: metrics - hosts: - - ${kubernetes.hints.mysql.performance.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} - metricsets: - - performance - password: ${kubernetes.hints.mysql.performance.password|kubernetes.hints.mysql.password|'test'} - period: ${kubernetes.hints.mysql.performance.period|kubernetes.hints.mysql.period|'10s'} - username: ${kubernetes.hints.mysql.performance.username|kubernetes.hints.mysql.username|'root'} - - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true - data_stream: - dataset: mysql.status - type: metrics - hosts: - - ${kubernetes.hints.mysql.status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} - metricsets: - - status - password: ${kubernetes.hints.mysql.status.password|kubernetes.hints.mysql.password|'test'} - period: ${kubernetes.hints.mysql.status.period|kubernetes.hints.mysql.period|'10s'} - username: ${kubernetes.hints.mysql.status.username|kubernetes.hints.mysql.username|'root'} - data_stream.namespace: default - name: filestream-mysql id: filestream-mysql-${kubernetes.hints.container_id} type: filestream @@ -49,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -63,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true - condition: ${kubernetes.hints.mysql.slowlog.enabled} == true or ${kubernetes.hints.mysql.enabled} == true data_stream: @@ -73,6 +38,8 @@ inputs: exclude_lines: - '^[\/\w\.]+, Version: .* started with:.*' - ^# Time:.* + file_identity: + fingerprint: null multiline: match: after negate: true @@ -85,5 +52,46 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true data_stream.namespace: default + - name: mysql/metrics-mysql + id: mysql/metrics-mysql-${kubernetes.hints.container_id} + type: mysql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.galera_status + type: metrics + hosts: + - ${kubernetes.hints.mysql.galera_status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - galera_status + password: ${kubernetes.hints.mysql.galera_status.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.galera_status.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.galera_status.username|kubernetes.hints.mysql.username|'root'} + - condition: ${kubernetes.hints.mysql.performance.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.performance + type: metrics + hosts: + - ${kubernetes.hints.mysql.performance.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - performance + password: ${kubernetes.hints.mysql.performance.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.performance.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.performance.username|kubernetes.hints.mysql.username|'root'} + - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.status + type: metrics + hosts: + - ${kubernetes.hints.mysql.status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - status + password: ${kubernetes.hints.mysql.status.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.status.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.status.username|kubernetes.hints.mysql.username|'root'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml index 4630a5b5e9e..c75da289568 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml @@ -1,4 +1,31 @@ inputs: + - name: filestream-nats + id: filestream-nats-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.log + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nats.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - nats-log + data_stream.namespace: default - name: nats/metrics-nats id: nats/metrics-nats-${kubernetes.hints.container_id} type: nats/metrics @@ -59,26 +86,3 @@ inputs: - subscriptions period: ${kubernetes.hints.nats.subscriptions.period|kubernetes.hints.nats.period|'10s'} data_stream.namespace: default - - name: filestream-nats - id: filestream-nats-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true - data_stream: - dataset: nats.log - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.nats.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - nats-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml index 4e5879469a4..930171a10de 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -1,4 +1,62 @@ inputs: + - name: filestream-nginx + id: filestream-nginx-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.access + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + ignore_older: 72h + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.error + type: logs + exclude_files: + - .gz$ + file_identity: + fingerprint: null + ignore_older: 72h + multiline: + match: after + negate: true + pattern: '^\d{4}\/\d{2}\/\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - nginx-error + data_stream.namespace: default - name: httpjson-nginx id: httpjson-nginx-${kubernetes.hints.container_id} type: httpjson @@ -97,53 +155,3 @@ inputs: period: ${kubernetes.hints.nginx.stubstatus.period|kubernetes.hints.nginx.period|'10s'} server_status_path: /nginx_status data_stream.namespace: default - - name: filestream-nginx - id: filestream-nginx-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true - data_stream: - dataset: nginx.access - type: logs - exclude_files: - - .gz$ - ignore_older: 72h - parsers: - - container: - format: auto - stream: ${kubernetes.hints.nginx.access.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - nginx-access - - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true - data_stream: - dataset: nginx.error - type: logs - exclude_files: - - .gz$ - ignore_older: 72h - multiline: - match: after - negate: true - pattern: '^\d{4}\/\d{2}\/\d{2} ' - parsers: - - container: - format: auto - stream: ${kubernetes.hints.nginx.error.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - nginx-error - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml index 29e0c8f1699..e5dac21fdf8 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml @@ -1,4 +1,31 @@ inputs: + - name: filestream-oracle + id: filestream-oracle-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.database_audit + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^Audit file + parsers: + - multiline: + match: after + negate: true + pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} + timeout: 10 + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - oracle-database_audit + data_stream.namespace: default - name: sql/metrics-oracle id: sql/metrics-oracle-${kubernetes.hints.container_id} type: sql/metrics @@ -92,30 +119,3 @@ inputs: - query: WITH data_files AS (SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status FROM sys.dba_data_files UNION SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, status AS ONLINE_STATUS FROM sys.dba_temp_files), spaces AS (SELECT b.tablespace_name TB_NAME, tbs_size TB_SIZE_USED, a.free_space TB_SIZE_FREE FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+) = b.tablespace_name AND a.tablespace_name != 'TEMP'), temp_spaces AS (SELECT tablespace_name, tablespace_size, allocated_space, free_space FROM dba_temp_free_space WHERE tablespace_name = 'TEMP'), details AS (SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, sp.tb_size_used, sp.tb_size_free FROM data_files df, spaces sp WHERE df.tablespace_name = sp.tb_name UNION SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, tsp.tablespace_size - tsp.free_space AS TB_SIZE_USED, tsp.free_space AS TB_SIZE_FREE FROM data_files df, temp_spaces tsp WHERE df.tablespace_name = tsp.tablespace_name) SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status, tb_size_used, tb_size_free, SUM(bytes) over() AS TOTAL_BYTES FROM details response_format: table data_stream.namespace: default - - name: filestream-oracle - id: filestream-oracle-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true - data_stream: - dataset: oracle.database_audit - type: logs - exclude_files: - - .gz$ - exclude_lines: - - ^Audit file - parsers: - - multiline: - match: after - negate: true - pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} - timeout: 10 - type: pattern - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - tags: - - oracle-database_audit - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml index 7aeb20e1ccc..76bf6396568 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml @@ -13,6 +13,10 @@ inputs: max_message_size: 50KiB processors: - add_locale: null + - copy_fields: + fields: + - from: '@timestamp' + to: event.created - syslog: field: message format: auto @@ -21,6 +25,7 @@ inputs: fields: internal_zones: - trust + tz_offset: Local target: _conf - add_fields: fields: @@ -44,6 +49,10 @@ inputs: max_message_size: 50KiB processors: - add_locale: null + - copy_fields: + fields: + - from: '@timestamp' + to: event.created - syslog: field: message format: auto @@ -52,6 +61,7 @@ inputs: fields: internal_zones: - trust + tz_offset: Local target: _conf - add_fields: fields: @@ -72,7 +82,7 @@ inputs: dataset: panw.panos type: logs exclude_files: - - .gz$ + - \.gz$ fields: _conf: external_zones: @@ -81,6 +91,8 @@ inputs: - trust tz_offset: local fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -89,8 +101,14 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log processors: - add_locale: null + - copy_fields: + fields: + - from: '@timestamp' + to: event.created prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - panw-panos diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml index 18c9cf3ed4b..8099386204d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -22,6 +24,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - postgresql-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml index e5b613a4804..43c1ade916a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml @@ -59,9 +59,9 @@ inputs: - remote_write port: 9201 rate_counters: true - ssl.certificate: /etc/pki/server/cert.pem + ssl.certificate: null ssl.enabled: null - ssl.key: null + ssl.key: /etc/pki/server/cert.key types_patterns.exclude: null types_patterns.include: null use_types: true diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml index 698bb87070c..075f51fdb1f 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null multiline: match: after negate: true @@ -24,6 +26,8 @@ inputs: - add_locale: null prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml index eefb7e7a008..a1321a6880c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml @@ -1,29 +1,4 @@ inputs: - - name: filestream-redis - id: filestream-redis-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true - data_stream: - dataset: redis.log - type: logs - exclude_files: - - .gz$ - exclude_lines: - - ^\s+[\-`('.|_] - parsers: - - container: - format: auto - stream: ${kubernetes.hints.redis.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: - - redis-log - data_stream.namespace: default - name: redis-redis id: redis-redis-${kubernetes.hints.container_id} type: redis @@ -85,3 +60,32 @@ inputs: password: ${kubernetes.hints.redis.keyspace.password|kubernetes.hints.redis.password|''} period: ${kubernetes.hints.redis.keyspace.period|kubernetes.hints.redis.period|'10s'} data_stream.namespace: default + - name: filestream-redis + id: filestream-redis-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.log + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^\s+[\-`('.|_] + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.redis.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true + tags: + - redis-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml index 3797fadc554..ca4282d7036 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - santa-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml index 8557717a5db..7e7c6e3de88 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -1,4 +1,26 @@ inputs: + - name: filestream-sentinel_one + id: filestream-sentinel_one-${kubernetes.hints.container_id} + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true + data_stream: + dataset: sentinel_one.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default - name: httpjson-sentinel_one id: httpjson-sentinel_one-${kubernetes.hints.container_id} type: httpjson @@ -195,25 +217,3 @@ inputs: - forwarded - sentinel_one-threat data_stream.namespace: default - - name: filestream-sentinel_one - id: filestream-sentinel_one-${kubernetes.hints.container_id} - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true - data_stream: - dataset: sentinel_one.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml index 36254df2c53..f08e0fb0c69 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -26,6 +28,8 @@ inputs: target: _tmp prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml index 9d94ff431d4..39de591c936 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml index cbb037a11d1..a25c2eb659c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml index 17f23d019e6..83e8671e3b3 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -15,6 +15,8 @@ inputs: remove_mapped_fields: false tz_offset: UTC fields_under_root: true + file_identity: + fingerprint: null parsers: - container: format: auto @@ -23,6 +25,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - symantec-endpoint-log diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml index 96a643f41ea..5127a4ba11d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml @@ -1,4 +1,32 @@ inputs: + - name: synthetics/http-synthetics + id: synthetics/http-synthetics-${kubernetes.hints.container_id} + type: synthetics/http + use_output: default + streams: + - __ui: null + check.request.method: null + condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: http + type: synthetics + enabled: true + ipv4: true + ipv6: true + max_attempts: 2 + max_redirects: null + name: null + password: ${kubernetes.hints.synthetics.http.password|kubernetes.hints.synthetics.password|''} + response.include_body: null + response.include_headers: null + run_from.geo.name: Fleet managed + run_from.id: fleet_managed + schedule: '@every 3m' + timeout: ${kubernetes.hints.synthetics.http.timeout|kubernetes.hints.synthetics.timeout|''} + type: http + urls: null + username: ${kubernetes.hints.synthetics.http.username|kubernetes.hints.synthetics.username|''} + data_stream.namespace: default - name: synthetics/tcp-synthetics id: synthetics/tcp-synthetics-${kubernetes.hints.container_id} type: synthetics/tcp @@ -13,12 +41,8 @@ inputs: hosts: ${kubernetes.hints.synthetics.tcp.host|kubernetes.hints.synthetics.host|''} ipv4: true ipv6: true + max_attempts: 2 name: null - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" proxy_use_local_resolver: false run_from.geo.name: Fleet managed run_from.id: fleet_managed @@ -40,12 +64,8 @@ inputs: hosts: ${kubernetes.hints.synthetics.icmp.host|kubernetes.hints.synthetics.host|''} ipv4: true ipv6: true + max_attempts: 2 name: null - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" run_from.geo.name: Fleet managed run_from.id: fleet_managed schedule: '@every 3m' @@ -64,12 +84,8 @@ inputs: dataset: browser type: synthetics enabled: true + max_attempts: 2 name: null - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" run_from.geo.name: Fleet managed run_from.id: fleet_managed schedule: '@every 3m' @@ -117,35 +133,3 @@ inputs: symlinks: true tags: [] data_stream.namespace: default - - name: synthetics/http-synthetics - id: synthetics/http-synthetics-${kubernetes.hints.container_id} - type: synthetics/http - use_output: default - streams: - - __ui: null - check.request.method: null - condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true - data_stream: - dataset: http - type: synthetics - enabled: true - ipv4: true - ipv6: true - max_redirects: null - name: null - password: ${kubernetes.hints.synthetics.http.password|kubernetes.hints.synthetics.password|''} - processors: - - add_fields: - fields: - monitor.fleet_managed: true - target: "" - response.include_body: null - response.include_headers: null - run_from.geo.name: Fleet managed - run_from.id: fleet_managed - schedule: '@every 3m' - timeout: ${kubernetes.hints.synthetics.http.timeout|kubernetes.hints.synthetics.timeout|''} - type: http - urls: null - username: ${kubernetes.hints.synthetics.http.username|kubernetes.hints.synthetics.username|''} - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml index e88d1490bc4..4b9422213e0 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml @@ -1,27 +1,20 @@ inputs: - - name: filestream-tomcat - id: filestream-tomcat-${kubernetes.hints.container_id} - type: filestream + - name: udp-tomcat + id: udp-tomcat-${kubernetes.hints.container_id} + type: udp use_output: default streams: - - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true data_stream: dataset: tomcat.log type: logs - exclude_files: - - .gz$ fields: observer: product: TomCat type: Web vendor: Apache fields_under_root: true - parsers: - - container: - format: auto - stream: ${kubernetes.hints.tomcat.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log + host: localhost:9523 processors: - script: lang: javascript @@ -2764,16 +2757,14 @@ inputs: target_field: url.registered_domain target_subdomain_field: url.subdomain - add_locale: null - prospector: - scanner: - symlinks: true tags: - tomcat-log - forwarded + udp: null data_stream.namespace: default - - name: udp-tomcat - id: udp-tomcat-${kubernetes.hints.container_id} - type: udp + - name: tcp-tomcat + id: tcp-tomcat-${kubernetes.hints.container_id} + type: tcp use_output: default streams: - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true @@ -5532,24 +5523,33 @@ inputs: tags: - tomcat-log - forwarded - udp: null + tcp: null data_stream.namespace: default - - name: tcp-tomcat - id: tcp-tomcat-${kubernetes.hints.container_id} - type: tcp + - name: filestream-tomcat + id: filestream-tomcat-${kubernetes.hints.container_id} + type: filestream use_output: default streams: - - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true data_stream: dataset: tomcat.log type: logs + exclude_files: + - .gz$ fields: observer: product: TomCat type: Web vendor: Apache fields_under_root: true - host: localhost:9523 + file_identity: + fingerprint: null + parsers: + - container: + format: auto + stream: ${kubernetes.hints.tomcat.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log processors: - script: lang: javascript @@ -8292,8 +8292,12 @@ inputs: target_field: url.registered_domain target_subdomain_field: url.subdomain - add_locale: null + prospector: + scanner: + fingerprint: + enabled: true + symlinks: true tags: - tomcat-log - forwarded - tcp: null data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml index dbd3b642d42..b485c4cbed5 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -18,6 +20,8 @@ inputs: - /var/log/containers/*${kubernetes.hints.container_id}.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -31,8 +35,7 @@ inputs: data_stream: dataset: traefik.health type: metrics - hosts: - - ${kubernetes.hints.traefik.health.host|kubernetes.hints.traefik.host|'localhost:8080'} + hosts: null metricsets: - health period: ${kubernetes.hints.traefik.health.period|kubernetes.hints.traefik.period|'10s'} diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml index bc21b484f27..1204c4e7e9c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml @@ -1,17 +1,4 @@ inputs: - - name: udp-udp - id: udp-udp-${kubernetes.hints.container_id} - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true - data_stream: - dataset: udp.generic - type: logs - host: localhost:8080 - max_message_size: 10KiB - timeout: ${kubernetes.hints.udp.generic.timeout|kubernetes.hints.udp.timeout|''} - data_stream.namespace: default - name: filestream-udp id: filestream-udp-${kubernetes.hints.container_id} type: filestream @@ -34,3 +21,16 @@ inputs: symlinks: true tags: [] data_stream.namespace: default + - name: udp-udp + id: udp-udp-${kubernetes.hints.container_id} + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true + data_stream: + dataset: udp.generic + type: logs + host: localhost:8080 + max_message_size: 10KiB + timeout: ${kubernetes.hints.udp.generic.timeout|kubernetes.hints.udp.timeout|''} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml index 871e2ade541..eac5df1c517 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml @@ -10,6 +10,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -20,6 +22,8 @@ inputs: - /usr/local/var/spool/zeek/capture_loss.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -30,6 +34,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -40,6 +46,8 @@ inputs: - /usr/local/var/spool/zeek/conn.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -50,6 +58,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -60,6 +70,8 @@ inputs: - /usr/local/var/spool/zeek/dce_rpc.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -70,6 +82,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -80,6 +94,8 @@ inputs: - /usr/local/var/spool/zeek/dhcp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -90,6 +106,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -100,6 +118,8 @@ inputs: - /usr/local/var/spool/zeek/dnp3.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - zeek-dnp3 @@ -109,6 +129,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -119,6 +141,8 @@ inputs: - /usr/local/var/spool/zeek/dns.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -129,6 +153,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -139,6 +165,8 @@ inputs: - /usr/local/var/spool/zeek/dpd.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - zeek-dpd @@ -148,6 +176,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -158,6 +188,8 @@ inputs: - /usr/local/var/spool/zeek/files.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - zeek-files @@ -167,6 +199,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -177,6 +211,8 @@ inputs: - /usr/local/var/spool/zeek/ftp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -187,6 +223,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -197,6 +235,8 @@ inputs: - /usr/local/var/spool/zeek/http.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -207,6 +247,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -217,6 +259,8 @@ inputs: - /usr/local/var/spool/zeek/intel.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -227,6 +271,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -237,6 +283,8 @@ inputs: - /usr/local/var/spool/zeek/irc.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -247,6 +295,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -257,6 +307,8 @@ inputs: - /usr/local/var/spool/zeek/kerberos.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -267,6 +319,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -277,6 +331,8 @@ inputs: - /usr/local/var/spool/zeek/known_certs.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -287,6 +343,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -297,6 +355,8 @@ inputs: - /usr/local/var/spool/zeek/known_hosts.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -307,6 +367,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -317,6 +379,8 @@ inputs: - /usr/local/var/spool/zeek/known_services.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -327,6 +391,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -337,6 +403,8 @@ inputs: - /usr/local/var/spool/zeek/modbus.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -347,6 +415,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -357,6 +427,8 @@ inputs: - /usr/local/var/spool/zeek/mysql.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -367,6 +439,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -377,6 +451,8 @@ inputs: - /usr/local/var/spool/zeek/notice.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -387,6 +463,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -397,6 +475,8 @@ inputs: - /usr/local/var/spool/zeek/ntlm.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -407,6 +487,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -417,6 +499,8 @@ inputs: - /usr/local/var/spool/zeek/ntp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -427,6 +511,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -437,6 +523,8 @@ inputs: - /usr/local/var/spool/zeek/ocsp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -447,6 +535,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -457,6 +547,8 @@ inputs: - /usr/local/var/spool/zeek/pe.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -467,6 +559,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -477,6 +571,8 @@ inputs: - /usr/local/var/spool/zeek/radius.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -487,6 +583,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -497,6 +595,8 @@ inputs: - /usr/local/var/spool/zeek/rdp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -507,6 +607,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -517,6 +619,8 @@ inputs: - /usr/local/var/spool/zeek/rfb.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -527,6 +631,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -537,6 +643,8 @@ inputs: - /usr/local/var/spool/zeek/signature.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -547,6 +655,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -557,6 +667,8 @@ inputs: - /usr/local/var/spool/zeek/sip.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -567,6 +679,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -577,6 +691,8 @@ inputs: - /usr/local/var/spool/zeek/smb_cmd.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -587,6 +703,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -597,6 +715,8 @@ inputs: - /usr/local/var/spool/zeek/smb_files.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -607,6 +727,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -617,6 +739,8 @@ inputs: - /usr/local/var/spool/zeek/smb_mapping.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -627,6 +751,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -637,6 +763,8 @@ inputs: - /usr/local/var/spool/zeek/smtp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -647,6 +775,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -657,6 +787,8 @@ inputs: - /usr/local/var/spool/zeek/snmp.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -667,6 +799,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -677,6 +811,8 @@ inputs: - /usr/local/var/spool/zeek/socks.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -687,6 +823,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -697,6 +835,8 @@ inputs: - /usr/local/var/spool/zeek/software.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -707,6 +847,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -717,6 +859,8 @@ inputs: - /usr/local/var/spool/zeek/ssh.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -727,6 +871,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -737,6 +883,8 @@ inputs: - /usr/local/var/spool/zeek/ssl.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -747,6 +895,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -757,6 +907,8 @@ inputs: - /usr/local/var/spool/zeek/stats.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -767,6 +919,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -777,6 +931,8 @@ inputs: - /usr/local/var/spool/zeek/syslog.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -787,6 +943,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -797,6 +955,8 @@ inputs: - /usr/local/var/spool/zeek/traceroute.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -807,6 +967,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -817,6 +979,8 @@ inputs: - /usr/local/var/spool/zeek/tunnel.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -827,6 +991,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -837,6 +1003,8 @@ inputs: - /usr/local/var/spool/zeek/weird.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded @@ -847,6 +1015,8 @@ inputs: type: logs exclude_files: - .gz$ + file_identity: + fingerprint: null parsers: - container: format: auto @@ -857,6 +1027,8 @@ inputs: - /usr/local/var/spool/zeek/x509.log prospector: scanner: + fingerprint: + enabled: true symlinks: true tags: - forwarded