Files
openstack-helm/fluent-logging/values.yaml
Steve Wilkerson ba736d9840 Fluent-logging: Update fluentd configuration
This updates the configuration for fluentd, providing a mechanism
for basic determination of the log level of a logged event via
entries from /var/log/containers. This log level is prepended to
the tag for that event, and also added as a new `level` key in
the resulting event. These two improvements allow for querying
specific log level events via the tag.

This also adds similar functionality to any events captured via
the oslo log fluentd handler/formatter. This allows for
elasticsearch queries akin to `error.openstack.keystone`, which
can be used by nagios or another alerting mechanism to raise
alerts when a particular level event has been captured.

Change-Id: I016ddcfcf7408de7b6511ddf7009e1e6a5f3a1d9
2018-09-19 14:22:27 -05:00

470 lines
12 KiB
YAML

# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for fluentbit and fluentd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
release_group: null
labels:
fluentd:
node_selector_key: openstack-control-plane
node_selector_value: enabled
fluentbit:
#(NOTE:seungkyua): when tolerations is true, nodeSelector will be disabled.
node_selector_key: openstack-control-plane
node_selector_value: enabled
tolerations: false
prometheus_fluentd_exporter:
node_selector_key: openstack-control-plane
node_selector_value: enabled
job:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
tags:
fluentbit: docker.io/fluent/fluent-bit:0.14.2
fluentd: docker.io/fluent/fluentd-kubernetes-daemonset:v1.2-debian-elasticsearch
prometheus_fluentd_exporter: docker.io/srwilkers/fluentd_exporter:v0.1
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
helm_tests: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
elasticsearch_template: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
image_repo_sync: docker.io/docker:17.07.0
pull_policy: IfNotPresent
local_registry:
active: false
exclude:
- dep_check
- image_repo_sync
secrets:
elasticsearch:
user: fluentd-elasticsearch-user
dependencies:
dynamic:
common:
local_image_registry:
jobs:
- fluent-logging-image-repo-sync
services:
- endpoint: node
service: local_image_registry
static:
elasticsearch_template:
services:
- endpoint: internal
service: elasticsearch
fluentbit:
jobs:
- elasticsearch-template
services:
- endpoint: internal
service: fluentd
fluentd:
jobs:
- elasticsearch-template
services:
- endpoint: internal
service: elasticsearch
fluentd_with_kafka:
services:
- endpoint: internal
service: elasticsearch
- endpoint: public
service: kafka
image_repo_sync:
services:
- endpoint: internal
service: local_image_registry
prometheus_fluentd_exporter:
services:
- endpoint: internal
service: fluentd
tests:
services:
- endpoint: internal
service: elasticsearch
- endpoint: internal
service: fluentd
conf:
fluentbit:
- service:
header: service
Flush: 30
Daemon: Off
Log_Level: info
Parsers_File: parsers.conf
- containers_tail:
header: input
Name: tail
Tag: kube.*
Path: /var/log/containers/*.log
Parser: docker
DB: /var/log/flb_kube.db
Mem_Buf_Limit: 5MB
DB.Sync: Normal
Buffer_Chunk_Size: 1M
Buffer_Max_Size: 1M
- drop_fluentd_logs:
header: output
Name: "null"
Match: "**.fluentd**"
- kube_filter:
header: filter
Name: kubernetes
Match: kube.*
Merge_JSON_Log: On
- fluentd_output:
header: output
Name: forward
Match: "*"
Host: ${FLUENTD_HOST}
Port: ${FLUENTD_PORT}
parsers:
- docker:
header: parser
Name: docker
Format: json
Time_Key: time
Time_Format: "%Y-%m-%dT%H:%M:%S.%L"
Time_Keep: On
td_agent:
- metrics_agent:
header: source
type: monitor_agent
bind: 0.0.0.0
port: 24220
- fluentbit_forward:
header: source
type: forward
port: "#{ENV['FLUENTD_PORT']}"
bind: 0.0.0.0
- filter_fluentd_logs:
header: match
expression: "fluent.**"
type: "null"
# NOTE(srwilkers): Look for specific keywords in the log key to determine
# log level of event
- tag_kubernetes_log_level:
header: match
type: rewrite_tag_filter
expression: "kube.var.log.containers.**.log"
rule:
-
- header: rule
key: log
pattern: /info/i
tag: info.${tag}
- header: rule
key: log
pattern: /warn/i
tag: warn.${tag}
- header: rule
key: log
pattern: /error/i
tag: error.${tag}
- header: rule
key: log
pattern: /critical/i
tag: critical.${tag}
- header: rule
key: log
pattern: (.+)
tag: info.${tag}
# NOTE(srwilkers): Create new key for log level, and use the tag prefix
# added previously
- add_kubernetes_log_level_and_application_key:
header: filter
type: record_transformer
enable_ruby: true
expression: "**.kube.var.log.containers.**.log"
record:
-
- header: record
level: ${tag_parts[0]}
application: ${record["kubernetes"]["labels"]["application"]}
- add_openstack_application_key:
header: filter
type: record_transformer
expression: "openstack.**"
record:
-
- header: record
application: ${tag_parts[1]}
#NOTE(srwilkers): This prefixes the tag for oslo.log entries from the
# fluent handler/formatter with the log level, allowing for lookups on
# openstack logs with a particular log level (ie: error.openstack.keystone)
- tag_openstack_log_level:
header: match
type: rewrite_tag_filter
expression: "openstack.**"
rule:
-
- header: rule
key: level
pattern: INFO
tag: info.${tag}
- header: rule
key: level
pattern: WARN
tag: warn.${tag}
- header: rule
key: level
pattern: ERROR
tag: error.${tag}
- header: rule
key: level
pattern: CRITICAL
tag: critical.${tag}
- elasticsearch:
header: match
type: elasticsearch
user: "#{ENV['ELASTICSEARCH_USERNAME']}"
password: "#{ENV['ELASTICSEARCH_PASSWORD']}"
expression: "**"
include_tag_key: true
host: "#{ENV['ELASTICSEARCH_HOST']}"
port: "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format: true
buffer_chunk_limit: 10M
buffer_queue_limit: 32
flush_interval: 20s
max_retry_wait: 300
disable_retry_limit: ""
num_threads: 8
type_name: fluent
fluentd_exporter:
log:
format: "logger:stdout?json=true"
level: "info"
templates:
fluent:
template: "logstash-*"
index_patterns: "logstash-*"
settings:
number_of_shards: 1
mappings:
fluent:
properties:
kubernetes:
properties:
container_name:
type: keyword
index: false
docker_id:
type: keyword
index: false
host:
type: keyword
index: false
namespace_name:
type: keyword
index: false
pod_id:
type: keyword
index: false
pod_name:
type: keyword
index: false
endpoints:
cluster_domain_suffix: cluster.local
local_image_registry:
name: docker-registry
namespace: docker-registry
hosts:
default: localhost
internal: docker-registry
node: localhost
host_fqdn_override:
default: null
port:
registry:
node: 5000
elasticsearch:
namespace: null
name: elasticsearch
auth:
admin:
username: admin
password: changeme
hosts:
data: elasticsearch-data
default: elasticsearch-logging
discovery: elasticsearch-discovery
public: elasticsearch
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
http:
default: 80
kafka:
namespace: null
name: kafka
hosts:
default: kafka-logging
public: kafka
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
service:
default: 9092
fluentd:
namespace: null
name: fluentd
hosts:
default: fluentd-logging
host_fqdn_override:
default: null
path:
default: null
scheme:
default: http
port:
service:
default: 24224
metrics:
default: 24220
prometheus_fluentd_exporter:
namespace: null
hosts:
default: fluentd-exporter
host_fqdn_override:
default: null
path:
default: /metrics
scheme:
default: 'http'
port:
metrics:
default: 9309
monitoring:
prometheus:
enabled: false
fluentd_exporter:
scrape: true
network:
fluentd:
node_port:
enabled: false
port: 32329
pod:
affinity:
anti:
type:
default: preferredDuringSchedulingIgnoredDuringExecution
topologyKey:
default: kubernetes.io/hostname
lifecycle:
upgrades:
daemonsets:
pod_replacement_strategy: RollingUpdate
fluentbit:
enabled: true
min_ready_seconds: 0
max_unavailable: 1
deployments:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
termination_grace_period:
fluentd:
timeout: 30
prometheus_fluentd_exporter:
timeout: 30
replicas:
fluentd: 3
prometheus_fluentd_exporter: 1
resources:
enabled: false
fluentbit:
limits:
memory: '400Mi'
cpu: '400m'
requests:
memory: '100Mi'
cpu: '100m'
fluentd:
limits:
memory: '1024Mi'
cpu: '2000m'
requests:
memory: '128Mi'
cpu: '500m'
prometheus_fluentd_exporter:
limits:
memory: "1024Mi"
cpu: "2000m"
requests:
memory: "128Mi"
cpu: "500m"
jobs:
image_repo_sync:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "1024Mi"
cpu: "2000m"
tests:
requests:
memory: '128Mi'
cpu: '100m'
limits:
memory: '1024Mi'
cpu: '2000m'
mounts:
fluentd:
fluentd:
fluentbit:
fluentbit:
fluent_tests:
fluent_tests:
elasticsearch_template:
init_container:
elasticsearch_template:
manifests:
configmap_bin: true
configmap_etc: true
deployment_fluentd: true
daemonset_fluentbit: true
job_image_repo_sync: true
helm_tests: true
monitoring:
prometheus:
configmap_bin: true
deployment_exporter: true
service_exporter: true
secret_elasticsearch: true
service_fluentd: true
job_elasticsearch_template: true