-
Notifications
You must be signed in to change notification settings - Fork 183
/
config.yaml
320 lines (297 loc) · 11.3 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
exporters:
otlphttp:
endpoint: http://${LOGS_METADATA_SVC}.${NAMESPACE}.svc.cluster.local.:4318
compression: zstd
sending_queue:
queue_size: 10
extensions:
file_storage:
compaction:
directory: /var/lib/storage/otc
on_rebound: true
directory: /var/lib/storage/otc
timeout: 10s
health_check: {}
pprof: {}
processors:
batch:
send_batch_max_size: 2000
send_batch_size: 1000
timeout: 1s
{{- if .Values.sumologic.logs.systemd.enabled }}
## copy _SYSTEMD_UNIT, SYSLOG_FACILITY, _HOSTNAME and PRIORITY from body to attributes
## so they can be used by metadata processors same way like for fluentd
## build fluent.tag attribute as `host.{_SYSTEMD_UNIT}`
logstransform/systemd:
operators:
- from: body._SYSTEMD_UNIT
to: attributes._SYSTEMD_UNIT
type: copy
- from: body.SYSLOG_FACILITY
to: attributes.SYSLOG_FACILITY
type: copy
- from: body._HOSTNAME
to: attributes._HOSTNAME
type: copy
- from: body.PRIORITY
to: attributes.PRIORITY
type: copy
- field: attributes["fluent.tag"]
type: add
value: EXPR("host." + attributes["_SYSTEMD_UNIT"])
- field: body.__CURSOR
type: remove
- field: body.__MONOTONIC_TIMESTAMP
type: remove
{{- end }}
receivers:
{{- if .Values.sumologic.logs.container.enabled }}
filelog/containers:
{{ if lt (int .Capabilities.KubeVersion.Minor) 24 }}
## sets fingerprint_size to 17kb in order to match the longest possible docker line (which by default is 16kb)
## we want to include timestamp, which is at the end of the line
## Not necessary in 1.24 and later, as docker-shim is not present anymore
fingerprint_size: 17408
{{ end }}
include:
- /var/log/pods/*/*/*.log
include_file_name: false
include_file_path: true
operators:
## Detect the container runtime log format
## Can be: docker-shim, CRI-O and containerd
- id: get-format
routes:
- expr: 'body matches "^\\{"'
output: parser-docker
- expr: 'body matches "^[^ Z]+ "'
output: parser-crio
- expr: 'body matches "^[^ Z]+Z"'
output: parser-containerd
type: router
## Parse CRI-O format
- id: parser-crio
output: merge-cri-lines
parse_to: body
regex: '^(?P<time>[^ Z]+) (?P<stream>stdout|stderr) (?P<logtag>[^ ]*)( |)(?P<log>.*)$'
timestamp:
layout: '2006-01-02T15:04:05.000000000-07:00'
layout_type: gotime
parse_from: body.time
type: regex_parser
## Parse CRI-Containerd format
- id: parser-containerd
output: merge-cri-lines
parse_to: body
regex: '^(?P<time>[^ ^Z]+Z) (?P<stream>stdout|stderr) (?P<logtag>[^ ]*)( |)(?P<log>.*)$'
timestamp:
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
parse_from: body.time
type: regex_parser
## Parse docker-shim format
## parser-docker interprets the input string as JSON and moves the `time` field from the JSON to Timestamp field in the OTLP log
## record.
## Input Body (string): '{"log":"2001-02-03 04:05:06 first line\n","stream":"stdout","time":"2021-11-25T09:59:13.23887954Z"}'
## Output Body (JSON): { "log": "2001-02-03 04:05:06 first line\n", "stream": "stdout" }
## Input Timestamp: _empty_
## Output Timestamp: 2021-11-25 09:59:13.23887954 +0000 UTC
- id: parser-docker
output: merge-docker-lines
parse_to: body
timestamp:
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
parse_from: body.time
type: json_parser
## merge-docker-lines stitches back together log lines split by Docker logging driver.
## Input Body (JSON): { "log": "2001-02-03 04:05:06 very long li", "stream": "stdout" }
## Input Body (JSON): { "log": "ne that was split by the logging driver\n", "stream": "stdout" }
## Output Body (JSON): { "log": "2001-02-03 04:05:06 very long line that was split by the logging driver\n","stream":"stdout"}
- id: merge-docker-lines
combine_field: body.log
combine_with: ""
is_last_entry: body.log matches "\n$"
output: strip-trailing-newline
source_identifier: attributes["log.file.path"]
type: recombine
## merge-cri-lines stitches back together log lines split by CRI logging drivers.
## Input Body (JSON): { "log": "2001-02-03 04:05:06 very long li", "logtag": "P" }
## Input Body (JSON): { "log": "ne that was split by the logging driver", "logtag": "F" }
## Output Body (JSON): { "log": "2001-02-03 04:05:06 very long line that was split by the logging driver", "logtag": "F" }
- id: merge-cri-lines
combine_field: body.log
combine_with: ""
is_last_entry: body.logtag == "F"
output: {{ .Values.sumologic.logs.multiline.enabled | ternary "merge-multiline-logs" "extract-metadata-from-filepath" }}
overwrite_with: newest
source_identifier: attributes["log.file.path"]
type: recombine
## strip-trailing-newline removes the trailing "\n" from the `log` key. This is required for logs coming from Docker container runtime.
## Input Body (JSON): { "log": "2001-02-03 04:05:06 very long line that was split by the logging driver\n", "stream": "stdout" }
## Output Body (JSON): { "log": "2001-02-03 04:05:06 very long line that was split by the logging driver", "stream": "stdout" }
- id: strip-trailing-newline
output: {{ .Values.sumologic.logs.multiline.enabled | ternary "merge-multiline-logs" "extract-metadata-from-filepath" }}
parse_from: body.log
parse_to: body
regex: "^(?P<log>.*)\n$"
type: regex_parser
{{- if .Values.sumologic.logs.multiline.enabled }}
## merge-multiline-logs merges incoming log records into multiline logs.
## Input Body (JSON): { "log": "2001-02-03 04:05:06 first line\n", "stream": "stdout" }
## Input Body (JSON): { "log": " second line\n", "stream": "stdout" }
## Input Body (JSON): { "log": " third line\n", "stream": "stdout" }
## Output Body (JSON): { "log": "2001-02-03 04:05:06 first line\n second line\n third line\n", "stream": "stdout" }
- id: merge-multiline-logs
combine_field: body.log
combine_with: "\n"
is_first_entry: body.log matches {{ .Values.sumologic.logs.multiline.first_line_regex | quote }}
output: extract-metadata-from-filepath
source_identifier: attributes["log.file.path"]
type: recombine
{{- end }}
## extract-metadata-from-filepath extracts data from the `log.file.path` Attribute into the Attributes
## Input Attributes:
## - log.file.path: '/var/log/pods/default_logger-multiline-4nvg4_aed49747-b541-4a07-8663-f7e1febc47d5/loggercontainer/0.log'
## Output Attributes:
## - log.file.path: '/var/log/pods/default_logger-multiline-4nvg4_aed49747-b541-4a07-8663-f7e1febc47d5/loggercontainer/0.log'
## - container_name: "loggercontainer",
## - namespace: "default",
## - pod_name: "logger-multiline-4nvg4",
## - run_id: "0",
## - uid: "aed49747-b541-4a07-8663-f7e1febc47d5"
## }
- id: extract-metadata-from-filepath
parse_from: attributes["log.file.path"]
regex: '^.*\/(?P<namespace>[^_]+)_(?P<pod_name>[^_]+)_(?P<uid>[a-f0-9\-]+)\/(?P<container_name>[^\._]+)\/(?P<run_id>\d+)\.log$'
type: regex_parser
## The following actions are being performed:
## - renaming attributes
## - moving stream from body to attributes
## - using body.log as body
## Input Body (JSON): {
## "log": "2001-02-03 04:05:06 loggerlog 1 first line\n",
## "stream": "stdout",
## }
## Output Body (String): "2001-02-03 04:05:06 loggerlog 1 first line\n"
## Input Attributes:
## - log.file.path: '/var/log/pods/default_logger-multiline-4nvg4_aed49747-b541-4a07-8663-f7e1febc47d5/loggercontainer/0.log'
## - container_name: "loggercontainer",
## - namespace: "default",
## - pod_name: "logger-multiline-4nvg4",
## - run_id: "0",
## - uid: "aed49747-b541-4a07-8663-f7e1febc47d5"
## Output Attributes:
## - k8s.container.name: "loggercontainer"
## - k8s.namespace.name: "default"
## - k8s.pod.name: "logger-multiline-4nvg4"
## - stream: "stdout"
- id: move-attributes
from: body.stream
to: attributes["stream"]
type: move
- from: attributes.container_name
to: attributes["k8s.container.name"]
type: move
- from: attributes.namespace
to: attributes["k8s.namespace.name"]
type: move
- from: attributes.pod_name
to: attributes["k8s.pod.name"]
type: move
- field: attributes.run_id
type: remove
- field: attributes.uid
type: remove
- field: attributes["log.file.path"]
type: remove
- from: body.log
to: body
type: move
storage: file_storage
{{- end }}
{{- if .Values.sumologic.logs.systemd.enabled }}
journald:
directory: /var/log/journal
## This is not a full equivalent of fluent-bit filtering as fluent-bit filters by `_SYSTEMD_UNIT`
## Here is filtering by `UNIT`
units:
{{- if .Values.sumologic.logs.systemd.units }}
{{ toYaml .Values.sumologic.logs.systemd.units | nindent 6 }}
{{- else }}
- addon-config.service
- addon-run.service
- cfn-etcd-environment.service
- cfn-signal.service
- clean-ca-certificates.service
- containerd.service
- coreos-metadata.service
- coreos-setup-environment.service
- coreos-tmpfiles.service
- dbus.service
- docker.service
- efs.service
- etcd-member.service
- etcd.service
- etcd2.service
- etcd3.service
- etcdadm-check.service
- etcdadm-reconfigure.service
- etcdadm-save.service
- etcdadm-update-status.service
- flanneld.service
- format-etcd2-volume.service
- kube-node-taint-and-uncordon.service
- kubelet.service
- ldconfig.service
- locksmithd.service
- logrotate.service
- lvm2-monitor.service
- mdmon.service
- nfs-idmapd.service
- nfs-mountd.service
- nfs-server.service
- nfs-utils.service
- node-problem-detector.service
- ntp.service
- oem-cloudinit.service
- rkt-gc.service
- rkt-metadata.service
- rpc-idmapd.service
- rpc-mountd.service
- rpc-statd.service
- rpcbind.service
- set-aws-environment.service
- system-cloudinit.service
- systemd-timesyncd.service
- update-ca-certificates.service
- user-cloudinit.service
- var-lib-etcd2.service
{{- end }}
{{- end }}
service:
extensions:
- health_check
- file_storage
- pprof
pipelines:
{{- if .Values.sumologic.logs.container.enabled }}
logs/containers:
exporters:
- otlphttp
processors:
- batch
receivers:
- filelog/containers
{{- end }}
{{- if .Values.sumologic.logs.systemd.enabled }}
logs/systemd:
exporters:
- otlphttp
processors:
- logstransform/systemd
- batch
receivers:
- journald
{{- end }}
telemetry:
logs:
level: {{ .Values.otellogs.logLevel | quote }}