Merge "Fluent-logging: Remove utils for generating configuration files"

This commit is contained in:
Zuul 2019-05-24 03:08:03 +00:00 committed by Gerrit Code Review
commit ff2d62c9bf
6 changed files with 336 additions and 1000 deletions

View File

@ -18,73 +18,6 @@ limitations under the License.
set -ex
# Test whether indexes have been created for each Elasticsearch output defined
function check_output_indexes_exist () {
{{/*
First, determine the sum of Fluentbit and Fluentd's flush intervals. This
ensures we wait long enough for recorded events to be indexed
*/}}
{{ $fluentBitConf := first .Values.conf.fluentbit }}
{{ $fluentBitServiceConf := index $fluentBitConf "service" }}
{{ $fluentBitFlush := index $fluentBitServiceConf "Flush" }}
fluentBitFlush={{$fluentBitFlush}}
{{/*
The generic Elasticsearch output should always be last, and intervals for all
Elasticsearch outputs should match. This means we can safely use the last item
in fluentd's configuration to get the Fluentd flush output interval
*/}}
{{- $fluentdConf := last .Values.conf.fluentd -}}
{{- $fluentdElasticsearchConf := index $fluentdConf "elasticsearch" -}}
{{- $fluentdFlush := index $fluentdElasticsearchConf "flush_interval" -}}
fluentdFlush={{$fluentdFlush}}
totalFlush=$(($fluentBitFlush + $fluentdFlush))
sleep $totalFlush
{{/*
Iterate over Fluentd's config and for each Elasticsearch output, determine
the logstash index prefix and check Elasticsearch for that index
*/}}
{{ range $key, $config := .Values.conf.td_agent -}}
{{/* Get list of keys to determine config header to index on */}}
{{- $keyList := keys $config -}}
{{- $configSection := first $keyList -}}
{{/* Index config section dictionary */}}
{{- $configEntry := index $config $configSection -}}
{{- if hasKey $configEntry "type" -}}
{{- $type := index $configEntry "type" -}}
{{- if eq $type "elasticsearch" -}}
{{- if hasKey $configEntry "logstash_prefix" -}}
{{- $logstashPrefix := index $configEntry "logstash_prefix" }}
{{$logstashPrefix}}_total_hits=$(curl -K- <<< "--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" \
-XGET "${ELASTICSEARCH_ENDPOINT}/{{$logstashPrefix}}-*/_search?pretty" -H 'Content-Type: application/json' \
| python -c "import sys, json; print json.load(sys.stdin)['hits']['total']")
if [ "${{$logstashPrefix}}_total_hits" -gt 0 ]; then
echo "PASS: Successful hits on {{$logstashPrefix}}-* index!"
else
echo "FAIL: No hits on query for {{$logstashPrefix}}-* index! Exiting";
exit 1;
fi
{{ else }}
logstash_total_hits=$(curl -K- <<< "--user ${ELASTICSEARCH_USERNAME}:${ELASTICSEARCH_PASSWORD}" \
-XGET "${ELASTICSEARCH_ENDPOINT}/logstash-*/_search?pretty" -H 'Content-Type: application/json' \
| python -c "import sys, json; print json.load(sys.stdin)['hits']['total']")
if [ "$logstash_total_hits" -gt 0 ]; then
echo "PASS: Successful hits on logstash-* index!"
else
echo "FAIL: No hits on query for logstash-* index! Exiting";
exit 1;
fi
{{ end }}
{{- end }}
{{- end }}
{{- end -}}
}
{{ if and (.Values.manifests.job_elasticsearch_template) (not (empty .Values.conf.templates)) }}
# Tests whether fluent-logging has successfully generated the elasticsearch index mapping
# templates defined by values.yaml
@ -106,4 +39,3 @@ function check_templates () {
{{ if and (.Values.manifests.job_elasticsearch_template) (not (empty .Values.conf.templates)) }}
check_templates
{{ end }}
check_output_indexes_exist

View File

@ -23,9 +23,9 @@ metadata:
name: fluent-logging-etc
type: Opaque
data:
fluent-bit.conf: {{ include "fluent_logging.utils.to_fluentbit_conf" .Values.conf.fluentbit | b64enc }}
parsers.conf: {{ include "fluent_logging.utils.to_fluentbit_conf" .Values.conf.parsers | b64enc }}
fluent.conf: {{ include "fluent_logging.utils.to_fluentd_conf" .Values.conf.fluentd | b64enc }}
fluent-bit.conf: {{ .Values.conf.fluentbit.template | b64enc }}
parsers.conf: {{ .Values.conf.parsers.template | b64enc }}
fluent.conf: {{ .Values.conf.fluentd.template | b64enc }}
{{ range $template, $fields := .Values.conf.templates }}
{{ $template }}.json: {{ toJson $fields | b64enc }}
{{ end }}

View File

@ -1,44 +0,0 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# This function generates fluentbit configuration files with entries in the
# fluent-logging values.yaml. It results in a configuration section with the
# following format (for as many key/value pairs defined in values for a section):
# [HEADER]
# key value
# key value
# key value
# The configuration schema can be found here:
# http://fluentbit.io/documentation/0.12/configuration/schema.html
{{- define "fluent_logging.utils.to_fluentbit_conf" -}}
{{- range $values := . -}}
{{- range $section := . -}}
{{- $header := pick . "header" -}}
{{- $config := omit . "header" }}
[{{$header.header | upper }}]
{{range $key, $value := $config -}}
{{ if eq $key "Rename" }}
{{- range $original, $new := $value -}}
{{ printf "Rename %s %s" $original $new | indent 4 }}
{{end}}
{{- else -}}
{{ $key | indent 4 }} {{ $value }}
{{end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -1,90 +0,0 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
# This function generates fluentd configuration files with entries in the
# fluent-logging values.yaml. It results in a configuration section with either
# of the following formats (for as many key/value pairs defined in values for a
section):
# <HEADER>
# key value
# key value
# key value
# </HEADER>
# or
# <HEADER>
# key value
# <INNER_HEADER>
# key value
# </INNER_HEADER>
# </HEADER>
# The configuration schema can be found here:
# https://docs.fluentd.org/v0.12/articles/config-file
{{- define "fluent_logging.utils.to_fluentd_conf" -}}
{{- range $values := . -}}
{{- range $section := . -}}
{{- $header := pick . "header" -}}
{{- $config := omit . "header" "expression" -}}
{{- if hasKey . "expression" -}}
{{ $regex := pick . "expression" }}
{{ printf "<%s %s>" $header.header $regex.expression }}
{{- else }}
{{ printf "<%s>" $header.header }}
{{- end }}
{{- range $key, $value := $config -}}
{{- if kindIs "slice" $value }}
{{- range $value := . -}}
{{- range $innerSection := . -}}
{{- $innerHeader := pick . "header" -}}
{{- $innerConfig := omit . "header" "expression" -}}
{{- if hasKey . "expression" -}}
{{ $innerRegex := pick . "expression" }}
{{ printf "<%s %s>" $innerHeader.header $innerRegex.expression | indent 2 }}
{{- else }}
{{ printf "<%s>" $innerHeader.header | indent 2 }}
{{- end }}
{{- range $innerKey, $innerValue := $innerConfig -}}
{{- if eq $innerKey "type" -}}
{{ $type := list "@" "type" | join "" }}
{{ $type | indent 4 }} {{ $innerValue }}
{{- else if contains "ENV" ($innerValue | quote) }}
{{ $innerKey | indent 4 }} {{ $innerValue | quote }}
{{- else if eq $innerKey "flush_interval" }}
{{ $innerKey | indent 4 }} {{ printf "%ss" $innerValue }}
{{- else }}
{{ $innerKey | indent 4 }} {{ $innerValue }}
{{- end }}
{{- end }}
{{ printf "</%s>" $innerHeader.header | indent 2 }}
{{- end -}}
{{ end -}}
{{- else }}
{{- if eq $key "type" -}}
{{ $type := list "@" "type" | join "" }}
{{ $type | indent 2 }} {{ $value }}
{{- else if contains "ENV" ($value | quote) }}
{{ $key | indent 2 }} {{ $value | quote }}
{{- else if eq $key "flush_interval" }}
{{ $key | indent 2 }} {{ printf "%ss" $value }}
{{- else }}
{{ $key | indent 2 }} {{ $value }}
{{- end -}}
{{- end -}}
{{- end }}
{{ printf "</%s>" $header.header }}
{{- end }}
{{ end -}}
{{- end -}}

View File

@ -103,347 +103,340 @@ dependencies:
service: fluentd
conf:
fluentbit:
- service:
header: service
Flush: 30
Daemon: Off
Log_Level: info
Parsers_File: parsers.conf
- kernel_messages:
header: input
Name: tail
Tag: kernel
Path: /var/log/kern.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- containers_tail:
header: input
Name: tail
Tag: kube.*
Path: /var/log/containers/*.log
Parser: docker
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- libvirt:
header: input
Name: tail
Tag: libvirt
Path: /var/log/libvirt/libvirtd.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- qemu:
header: input
Name: tail
Tag: qemu
Path: /var/log/libvirt/qemu/*.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kubelet:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=kubelet.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- docker_daemon:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=docker.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- throttle_filter:
header: filter
Name: throttle
Match: "**"
Rate: 1000
Window: 300
Interval: 1s
- libvirt_record_modifier:
header: filter
Name: record_modifier
Match: libvirt
Record: hostname ${HOSTNAME}
- qemu_record_modifier:
header: filter
Name: record_modifier
Match: qemu
Record: hostname ${HOSTNAME}
- kernel_record_modifier:
header: filter
Name: record_modifier
Match: kernel
Record: hostname ${HOSTNAME}
- systemd_modify_fields:
header: filter
Name: modify
Match: journal.**
Rename:
_BOOT_ID: BOOT_ID
_CAP_EFFECTIVE: CAP_EFFECTIVE
_CMDLINE: CMDLINE
_COMM: COMM
_EXE: EXE
_GID: GID
_HOSTNAME: HOSTNAME
_MACHINE_ID: MACHINE_ID
_PID: PID
_SYSTEMD_CGROUP: SYSTEMD_CGROUP
_SYSTEMD_SLICE: SYSTEMD_SLICE
_SYSTEMD_UNIT: SYSTEMD_UNIT
_UID: UID
_TRANSPORT: TRANSPORT
- drop_fluentd_logs:
header: output
Name: "null"
Match: "**.fluentd**"
- kube_filter:
header: filter
Name: kubernetes
Match: kube.*
Merge_JSON_Log: On
- fluentd_output:
header: output
Name: forward
Match: "*"
Host: ${FLUENTD_HOST}
Port: ${FLUENTD_PORT}
parsers:
- docker:
header: parser
Name: docker
Format: json
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
Decode_Field_As: escaped_utf8 log
fluentd:
- metrics_agent:
header: source
type: monitor_agent
bind: 0.0.0.0
port: 24220
- fluentbit_forward:
header: source
type: forward
port: "#{ENV['FLUENTD_PORT']}"
bind: 0.0.0.0
- filter_fluentd_logs:
header: match
expression: "fluent.**"
type: "null"
# NOTE(srwilkers): Look for specific keywords in the log key to determine
# log level of event
- tag_kubernetes_log_level:
header: match
type: rewrite_tag_filter
expression: "kube.var.log.containers.**.log"
rule:
-
- header: rule
key: log
pattern: /info/i
tag: info.${tag}
- header: rule
key: log
pattern: /warn/i
tag: warn.${tag}
- header: rule
key: log
pattern: /error/i
tag: error.${tag}
- header: rule
key: log
pattern: /critical/i
tag: critical.${tag}
- header: rule
key: log
pattern: (.+)
tag: info.${tag}
# NOTE(srwilkers): Create new key for log level, and use the tag prefix
# added previously
- add_kubernetes_log_level_and_application_key:
header: filter
type: record_transformer
enable_ruby: true
expression: "**.kube.var.log.containers.**.log"
record:
-
- header: record
level: ${tag_parts[0]}
application: ${record["kubernetes"]["labels"]["application"]}
- add_openstack_application_key:
header: filter
type: record_transformer
expression: "openstack.**"
record:
-
- header: record
application: ${tag_parts[1]}
#NOTE(srwilkers): This prefixes the tag for oslo.log entries from the
# fluent handler/formatter with the log level, allowing for lookups on
# openstack logs with a particular log level (ie: error.openstack.keystone)
- tag_openstack_log_level:
header: match
type: rewrite_tag_filter
expression: "openstack.**"
rule:
-
- header: rule
key: level
pattern: INFO
tag: info.${tag}
- header: rule
key: level
pattern: WARN
tag: warn.${tag}
- header: rule
key: level
pattern: ERROR
tag: error.${tag}
- header: rule
key: level
pattern: CRITICAL
tag: critical.${tag}
#NOTE(tp6510): This prefixes the tag for auth entries
# it allows for lookups on openstack logs with
# a particular auth log (ie: auth.openstack.keystone)
- tag_auth_log:
header: match
type: rewrite_tag_filter
expression: "*.openstack.**"
rule:
-
- header: rule
key: application
pattern: keystone
tag: auth.${tag}
- header: rule
key: application
pattern: horizon
tag: auth.${tag}
- header: rule
key: application
pattern: mariadb
tag: auth.${tag}
- header: rule
key: application
pattern: memcached
tag: auth.${tag}
- header: rule
key: application
pattern: rabbitmq
tag: auth.${tag}
- libvirt_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "libvirt"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: libvirt
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- qemu_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "qemu"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: qemu
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- journal_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "journal.**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: journal
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- kernel_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "kernel"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: kernel
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
type_name: fluent
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
flush_interval: "15"
template: |
<source>
bind 0.0.0.0
port 24220
@type monitor_agent
</source>
<source>
bind 0.0.0.0
port "#{ENV['FLUENTD_PORT']}"
@type forward
</source>
<match fluent.**>
@type null
</match>
<match kube.var.log.containers.**.log>
<rule>
key log
pattern /info/i
tag info.${tag}
</rule>
<rule>
key log
pattern /warn/i
tag warn.${tag}
</rule>
<rule>
key log
pattern /error/i
tag error.${tag}
</rule>
<rule>
key log
pattern /critical/i
tag critical.${tag}
</rule>
<rule>
key log
pattern (.+)
tag info.${tag}
</rule>
@type rewrite_tag_filter
</match>
<filter **.kube.var.log.containers.**.log>
enable_ruby true
<record>
application ${record["kubernetes"]["labels"]["application"]}
level ${tag_parts[0]}
</record>
@type record_transformer
</filter>
<filter openstack.**>
<record>
application ${tag_parts[1]}
</record>
@type record_transformer
</filter>
<match openstack.**>
<rule>
key level
pattern INFO
tag info.${tag}
</rule>
<rule>
key level
pattern WARN
tag warn.${tag}
</rule>
<rule>
key level
pattern ERROR
tag error.${tag}
</rule>
<rule>
key level
pattern CRITICAL
tag critical.${tag}
</rule>
@type rewrite_tag_filter
</match>
<match *.openstack.**>
<rule>
key application
pattern keystone
tag auth.${tag}
</rule>
<rule>
key application
pattern horizon
tag auth.${tag}
</rule>
<rule>
key application
pattern mariadb
tag auth.${tag}
</rule>
<rule>
key application
pattern memcached
tag auth.${tag}
</rule>
<rule>
key application
pattern rabbitmq
tag auth.${tag}
</rule>
@type rewrite_tag_filter
</match>
<match libvirt>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix libvirt
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match qemu>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix qemu
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match journal.**>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix journal
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match kernel>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
logstash_prefix kernel
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
<match **>
<buffer>
chunk_limit_size 8MB
flush_interval 15s
flush_thread_count 8
queue_limit_length 256
retry_forever false
retry_max_interval 30
</buffer>
flush_interval 15s
host "#{ENV['ELASTICSEARCH_HOST']}"
include_tag_key true
logstash_format true
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
type_name fluent
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
fluentbit:
template: |
[SERVICE]
Daemon false
Flush 30
Log_Level info
Parsers_File parsers.conf
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/kern.log
Tag kernel
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Parser docker
Path /var/log/containers/*.log
Tag kube.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/libvirt/libvirtd.log
Tag libvirt
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name tail
Path /var/log/libvirt/qemu/*.log
Tag qemu
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name systemd
Path ${JOURNAL_PATH}
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Tag journal.*
[INPUT]
Buffer_Chunk_Size 1M
Buffer_Max_Size 1M
Mem_Buf_Limit 5MB
Name systemd
Path ${JOURNAL_PATH}
Systemd_Filter _SYSTEMD_UNIT=docker.service
Tag journal.*
[FILTER]
Interval 1s
Match **
Name throttle
Rate 1000
Window 300
[FILTER]
Match libvirt
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match qemu
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match kernel
Name record_modifier
Record hostname ${HOSTNAME}
[FILTER]
Match journal.**
Name modify
Rename _BOOT_ID BOOT_ID
Rename _CAP_EFFECTIVE CAP_EFFECTIVE
Rename _CMDLINE CMDLINE
Rename _COMM COMM
Rename _EXE EXE
Rename _GID GID
Rename _HOSTNAME HOSTNAME
Rename _MACHINE_ID MACHINE_ID
Rename _PID PID
Rename _SYSTEMD_CGROUP SYSTEMD_CGROUP
Rename _SYSTEMD_SLICE SYSTEMD_SLICE
Rename _SYSTEMD_UNIT SYSTEMD_UNIT
Rename _TRANSPORT TRANSPORT
Rename _UID UID
[OUTPUT]
Match **.fluentd**
Name null
[FILTER]
Match kube.*
Merge_JSON_Log true
Name kubernetes
[OUTPUT]
Host ${FLUENTD_HOST}
Match *
Name forward
Port ${FLUENTD_PORT}
parsers:
template: |
[PARSER]
Decode_Field_As escaped_utf8 log
Format json
Name docker
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep true
Time_Key time
fluentd_exporter:
log:
format: "logger:stdout?json=true"

View File

@ -369,461 +369,6 @@ data:
component: test
values:
release_uuid: ${RELEASE_UUID}
conf:
fluentbit:
- service:
header: service
Flush: 30
Daemon: Off
Log_Level: info
Parsers_File: parsers.conf
- ceph_cluster_logs:
header: input
Name: tail
Tag: ceph.cluster.*
Path: /var/log/ceph/ceph.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- ceph_audit_logs:
header: input
Name: tail
Tag: ceph.audit.*
Path: /var/log/ceph/ceph.audit.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- ceph_mon_logs:
header: input
Name: tail
Tag: ceph.mon.*
Path: /var/log/ceph/ceph-mon**.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- ceph_osd_logs:
header: input
Name: tail
Tag: ceph.osd.*
Path: /var/log/ceph/ceph-osd**.log
Parsers: syslog
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kernel_messages:
header: input
Name: tail
Tag: kernel
Path: /var/log/kern.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- kubelet:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=kubelet.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- libvirt:
header: input
Name: tail
Tag: libvirt
Path: /var/log/libvirt/libvirtd.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- qemu:
header: input
Name: tail
Tag: qemu
Path: /var/log/libvirt/qemu/*.log
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- docker_daemon:
header: input
Name: systemd
Tag: journal.*
Path: ${JOURNAL_PATH}
Systemd_Filter: _SYSTEMD_UNIT=docker.service
Mem_Buf_Limit: 5MB
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- throttle_filter:
header: filter
Name: throttle
Match: "**"
Rate: 1000
Window: 300
Interval: 1s
- libvirt_record_modifier:
header: filter
Name: record_modifier
Match: libvirt
Record: hostname ${HOSTNAME}
- qemu_record_modifier:
header: filter
Name: record_modifier
Match: qemu
Record: hostname ${HOSTNAME}
- kernel_record_modifier:
header: filter
Name: record_modifier
Match: kernel
Record: hostname ${HOSTNAME}
- systemd_modify_fields:
header: filter
Name: modify
Match: journal.**
Rename:
_BOOT_ID: BOOT_ID
_CAP_EFFECTIVE: CAP_EFFECTIVE
_CMDLINE: CMDLINE
_COMM: COMM
_EXE: EXE
_GID: GID
_HOSTNAME: HOSTNAME
_MACHINE_ID: MACHINE_ID
_PID: PID
_SYSTEMD_CGROUP: SYSTEMD_CGROUP
_SYSTEMD_SLICE: SYSTEMD_SLICE
_SYSTEMD_UNIT: SYSTEMD_UNIT
_UID: UID
_TRANSPORT: TRANSPORT
- containers_tail:
header: input
Name: tail
Tag: kube.*
Path: /var/log/containers/*.log
Parser: docker
DB: /var/log/flb_kube.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- drop_fluentd_logs:
header: output
Name: "null"
Match: "**.fluentd**"
- kube_filter:
header: filter
Name: kubernetes
Match: kube.*
Merge_JSON_Log: On
- fluentd_output:
header: output
Name: forward
Match: "*"
Host: ${FLUENTD_HOST}
Port: ${FLUENTD_PORT}
parsers:
- docker:
header: parser
Name: docker
Format: json
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
- syslog:
header: parser
Name: syslog
Format: regex
Regex: '^(?<time>.*[0-9]{2}:[0-9]{2}:[0-9]{2}) (?<host>[^ ]*) (?<app>[a-zA-Z0-9_\/\.\-]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? (?<log>.+)$'
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
Types: "pid:integer"
fluentd:
- metrics_agent:
header: source
type: monitor_agent
bind: 0.0.0.0
port: 24220
- fluentbit_forward:
header: source
type: forward
port: "#{ENV['FLUENTD_PORT']}"
bind: 0.0.0.0
- drop_fluent_logs:
header: match
type: "null"
expression: "fluent.*"
- add_container_name:
header: filter
type: record_transformer
expression: "kube.**"
enable_ruby: true
record:
-
- header: record
container_name: ${record["kubernetes"]["container_name"]}
- remove_openstack_pod_logged_events:
header: filter
type: grep
expression: "kube.**"
exclude:
-
- header: exclude
key: container_name
pattern: ^(cinder-api|cinder-scheduler|cinder-volume|cinder-backup|glance-api|glance-registry|heat-api|heat-cfn|heat-engine|keystone-api|neutron-dhcp-agent|neutron-l3-agent|neutron-server|nova-osapi|nova-api|nova-compute|nova-conductor|nova-consoleauth|nova-novncproxy|nova-scheduler)$
# NOTE(srwilkers): Look for specific keywords in the log key to determine
# log level of event
- tag_kubernetes_log_level:
header: match
type: rewrite_tag_filter
expression: "kube.var.log.containers.**.log"
rule:
-
- header: rule
key: log
pattern: /info/i
tag: info.${tag}
- header: rule
key: log
pattern: /warn/i
tag: warn.${tag}
- header: rule
key: log
pattern: /error/i
tag: error.${tag}
- header: rule
key: log
pattern: /critical/i
tag: critical.${tag}
- header: rule
key: log
pattern: (.+)
tag: info.${tag}
# NOTE(srwilkers): Create new key for log level, and use the tag prefix
# added previously
- add_kubernetes_log_level_and_application_key:
header: filter
type: record_transformer
enable_ruby: true
expression: "**.kube.var.log.containers.**.log"
record:
-
- header: record
level: ${tag_parts[0]}
application: ${record["kubernetes"]["labels"]["application"]}
- add_openstack_application_key:
header: filter
type: record_transformer
expression: "openstack.**"
record:
-
- header: record
application: ${tag_parts[1]}
#NOTE(srwilkers): This prefixes the tag for oslo.log entries from the
# fluent handler/formatter with the log level, allowing for lookups on
# openstack logs with a particular log level (ie: error.openstack.keystone)
- tag_openstack_log_level:
header: match
type: rewrite_tag_filter
expression: "openstack.**"
rule:
-
- header: rule
key: level
pattern: INFO
tag: info.${tag}
- header: rule
key: level
pattern: WARN
tag: warn.${tag}
- header: rule
key: level
pattern: ERROR
tag: error.${tag}
- header: rule
key: level
pattern: CRITICAL
tag: critical.${tag}
- libvirt_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "libvirt"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: libvirt
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- qemu_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "qemu"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: qemu
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- journal_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "journal.**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: journal
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- kernel_elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "kernel"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
logstash_prefix: kernel
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
- elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
buffer:
-
- header: buffer
flush_thread_count: 8
flush_interval: "15"
chunk_limit_size: 8MB
queue_limit_length: 256
retry_max_interval: 30
retry_forever: false
flush_interval: "15"
fluentd_exporter:
log:
format: "logger:stdout?json=true"
level: "info"
templates:
syslog:
template: "syslog-*"
index_patterns: "syslog-*"
settings:
number_of_shards: 1
mappings:
syslog:
properties:
cluster:
type: keyword
app:
type: keyword
pid:
type: integer
host:
type: keyword
log:
type: text
oslo_openstack_fluentd:
template: "openstack-*"
index_patterns: "openstack-*"
settings:
number_of_shards: 1
mappings:
oslo_openstack_fluentd:
properties:
extra:
properties:
project:
type: text
norms: false
version:
type: text
norms: false
filename:
type: text
norms: false
funcname:
type: text
norms: false
message:
type: text
norms: false
process_name:
type: keyword
index: false
docker_fluentd:
template: "logstash-*"
index_patterns: "logstash-*"
settings:
number_of_shards: 1
mappings:
docker_fluentd:
properties:
kubernetes:
properties:
container_name:
type: keyword
index: false
docker_id:
type: keyword
index: false
host:
type: keyword
index: false
namespace_name:
type: keyword
index: false
pod_id:
type: keyword
index: false
pod_name:
type: keyword
index: false
monitoring:
prometheus:
enabled: true