forked from billimek/k8s-gitops
-
Notifications
You must be signed in to change notification settings - Fork 0
/
fluentd.yaml
162 lines (161 loc) · 4.71 KB
/
fluentd.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
---
apiVersion: flux.weave.works/v1beta1
kind: HelmRelease
metadata:
name: fluentd
namespace: logs
annotations:
flux.weave.works/automated: "true"
flux.weave.works/tag.chart-image: glob:v2.*.*
spec:
releaseName: fluentd
chart:
repository: https://kiwigrid.github.io/
name: fluentd-elasticsearch
version: 4.5.0
values:
image:
repository: quay.io/fluentd_elasticsearch/fluentd
tag: v2.7.0
elasticsearch:
host: 'elasticsearch-master.logs'
tolerations:
- key: "node-role.kubernetes.io/etcd"
operator: "Exists"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
# tolerations:
# - effect: NoExecute
# key: "node-role.kubernetes.io/etcd"
# value: "true"
# - effect: NoSchedule
# key: "node-role.kubernetes.io/controlplane"
# value: "true"
configMaps:
output.conf: |-
<match **>
@id elasticsearch
@type elasticsearch
@log_level error
include_tag_key true
type_name _doc
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
ssl_verify true
user "#{ENV['OUTPUT_USER']}"
password "#{ENV['OUTPUT_PASSWORD']}"
logstash_format true
logstash_prefix "#{ENV['LOGSTASH_PREFIX']}"
reconnect_on_error true
verify_es_version_at_startup true
default_elasticsearch_version 7
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action block
</buffer>
</match>
---
apiVersion: flux.weave.works/v1beta1
kind: HelmRelease
metadata:
name: fluentd-syslog
namespace: logs
annotations:
flux.weave.works/automated: "true"
flux.weave.works/tag.chart-image: glob:v2.*
spec:
releaseName: fluentd-syslog
chart:
repository: https://kubernetes-charts.storage.googleapis.com/
name: fluentd
version: 1.10.1
values:
image:
repository: gcr.io/google-containers/fluentd-elasticsearch
tag: v2.4.0
output:
host: elasticsearch-master.logs
port: 9200
service:
type: LoadBalancer
externalPort: 80
ports:
- name: "syslog"
protocol: UDP
containerPort: 5140
persistence:
enabled: true
configMaps:
general.conf: |
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
<match fluentd.**>
@type null
</match>
# Used for health checking
<source>
@type http
port 9880
bind 0.0.0.0
</source>
# Emits internal metrics to every minute, and also exposes them on port
# 24220. Useful for determining if an output plugin is retryring/erroring,
# or determining the buffer queue length.
<source>
@type monitor_agent
bind 0.0.0.0
port 24220
tag fluentd.monitor.metrics
</source>
# syslog
<source>
@type syslog
port 5140
bind 0.0.0.0
tag system
</source>
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
forward-input.conf: |
<source>
@type forward
port 24224
bind 0.0.0.0
</source>
output.conf: |
<match system.**>
@type elasticsearch
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
logstash_format true
logstash_prefix fluentd-syslog
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action block
</buffer>
</match>