update packetbeat config for the latest release

Change-Id: If370e015ec2ec33b6f6e744958d7bcbed041ab42
Signed-off-by: Kevin Carter <kevin@cloudnull.com>
This commit is contained in:
Kevin Carter 2019-02-26 22:29:53 -06:00
parent 2d3c0d55f4
commit c74eed3845
2 changed files with 184 additions and 15 deletions

View File

@ -128,6 +128,10 @@ packetbeat.protocols:
# This option indicates which Operator/Operators will be ignored.
#ignored_ops: ["SUPPORTED","OPTIONS"]
- type: dhcpv4
# Configure the DHCP for IPv4 ports.
ports: [67, 68]
- type: dns
# Enable DNS monitoring. Default: true
enabled: true
@ -515,7 +519,8 @@ packetbeat.protocols:
# Hints the minimum number of events stored in the queue,
# before providing a batch of events to the outputs.
# A value of 0 (the default) ensures events are immediately available
# The default value is set to 2048.
# A value of 0 ensures events are immediately available
# to be sent to the outputs.
#flush.min_events: 2048
@ -523,6 +528,66 @@ packetbeat.protocols:
# if the number of events stored in the queue is < min_flush_events.
#flush.timeout: 1s
# The spool queue will store events in a local spool file, before
# forwarding the events to the outputs.
#
# Beta: spooling to disk is currently a beta feature. Use with care.
#
# The spool file is a circular buffer, which blocks once the file/buffer is full.
# Events are put into a write buffer and flushed once the write buffer
# is full or the flush_timeout is triggered.
# Once ACKed by the output, events are removed immediately from the queue,
# making space for new events to be persisted.
#spool:
# The file namespace configures the file path and the file creation settings.
# Once the file exists, the `size`, `page_size` and `prealloc` settings
# will have no more effect.
#file:
# Location of spool file. The default value is ${path.data}/spool.dat.
#path: "${path.data}/spool.dat"
# Configure file permissions if file is created. The default value is 0600.
#permissions: 0600
# File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB.
#size: 100MiB
# The files page size. A file is split into multiple pages of the same size. The default value is 4KiB.
#page_size: 4KiB
# If prealloc is set, the required space for the file is reserved using
# truncate. The default value is true.
#prealloc: true
# Spool writer settings
# Events are serialized into a write buffer. The write buffer is flushed if:
# - The buffer limit has been reached.
# - The configured limit of buffered events is reached.
# - The flush timeout is triggered.
#write:
# Sets the write buffer size.
#buffer_size: 1MiB
# Maximum duration after which events are flushed, if the write buffer
# is not full yet. The default value is 1s.
#flush.timeout: 1s
# Number of maximum buffered events. The write buffer is flushed once the
# limit is reached.
#flush.events: 16384
# Configure the on-disk event encoding. The encoding can be changed
# between restarts.
# Valid encodings are: json, ubjson, and cbor.
#codec: cbor
#read:
# Reader flush timeout, waiting for more events to become available, so
# to fill a complete batch, as required by the outputs.
# If flush_timeout is 0, all available events are forwarded to the
# outputs immediately.
# The default value is 0s.
#flush.timeout: 0s
# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:
@ -536,8 +601,8 @@ packetbeat.protocols:
#
# event -> filter1 -> event1 -> filter2 ->event2 ...
#
# The supported processors are drop_fields, drop_event, include_fields, and
# add_cloud_metadata.
# The supported processors are drop_fields, drop_event, include_fields,
# decode_json_fields, and add_cloud_metadata.
#
# For example, you can use the following processors to keep the fields that
# contain CPU load percentages, but remove the fields that contain CPU ticks
@ -557,6 +622,22 @@ packetbeat.protocols:
# equals:
# http.code: 200
#
# The following example renames the field a to b:
#
#processors:
#- rename:
# fields:
# - from: "a"
# to: "b"
#
# The following example tokenizes the string into fields:
#
#processors:
#- dissect:
# tokenizer: "%{key1} - %{key2}"
# field: "message"
# target_prefix: "dissect"
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
@ -581,7 +662,9 @@ packetbeat.protocols:
# match_pids: ["process.pid", "process.ppid"]
# match_source: true
# match_source_index: 4
# match_short_id: false
# cleanup_timeout: 60
# labels.dedot: false
# # To connect to Docker over TLS you must specify a client and CA certificate.
# #ssl:
# # certificate_authority: "/etc/pki/root/ca.pem"
@ -594,6 +677,31 @@ packetbeat.protocols:
#
#processors:
#- add_docker_metadata: ~
#
# The following example enriches each event with host metadata.
#
#processors:
#- add_host_metadata:
# netinfo.enabled: false
#
# The following example enriches each event with process metadata using
# process IDs included in the event.
#
#processors:
#- add_process_metadata:
# match_pids: ["system.process.ppid"]
# target: system.process.parent
#
# The following example decodes fields containing JSON strings
# and replaces the strings with valid JSON objects.
#
#processors:
#- decode_json_fields:
# fields: ["field1", "field2", ...]
# process_array: false
# max_depth: 1
# target: ""
# overwrite_keys: false
processors:
- add_host_metadata: ~
@ -746,9 +854,16 @@ processors:
#username: ''
#password: ''
# Kafka version packetbeat is assumed to run against. Defaults to the oldest
# supported stable version (currently version 0.8.2.0)
#version: 0.8.2
# Kafka version packetbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Metadata update configuration. Metadata do contain leader information
# deciding which broker to use when publishing.
@ -796,6 +911,10 @@ processors:
# default is gzip.
#compression: gzip
# Set the compression level. Currently only gzip provides a compression level
# between 0 and 9. The default value is chosen by the compression algorithm.
#compression_level: 4
# The maximum permitted size of JSON-encoded messages. Bigger messages will be
# dropped. The default value is 1000000 (bytes). This value should be equal to
# or less than the broker's message.max.bytes.
@ -852,6 +971,14 @@ processors:
# Boolean flag to enable or disable the output module.
#enabled: true
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# The list of Redis servers to connect to. If load balancing is enabled, the
# events are distributed to the servers in the list. If one server becomes
# unreachable, the events are distributed to the reachable servers only.
@ -899,6 +1026,17 @@ processors:
# until all events are published. The default is 3.
#max_retries: 3
# The number of seconds to wait before trying to reconnect to Redis
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Redis after a network error. The default is 60s.
#backoff.max: 60s
# The maximum number of events to bulk in a single Redis request or pipeline.
# The default is 2048.
#bulk_max_size: 2048
@ -953,6 +1091,14 @@ processors:
# Boolean flag to enable or disable the output module.
#enabled: true
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
# Path to the directory where to save the generated files. The option is
# mandatory.
#path: "/tmp/packetbeat"
@ -980,8 +1126,13 @@ processors:
# Boolean flag to enable or disable the output module.
#enabled: true
# Pretty print json event
#pretty: false
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false
# Configure escaping html symbols in strings.
#escape_html: true
#================================= Paths ======================================
@ -1009,6 +1160,10 @@ processors:
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#================================ Keystore ==========================================
# Location of the Keystore containing the keys and their sensitive values.
#keystore.path: "${path.config}/beats.keystore"
#============================== Dashboards =====================================
{{ elk_macros.setup_dashboards('packetbeat') }}
@ -1026,7 +1181,7 @@ processors:
#============================== Xpack Monitoring ===============================
{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }}
#================================ HTTP Endpoint ================================
#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
@ -1040,3 +1195,8 @@ processors:
# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066
#============================= Process Security ================================
# Enable or disable seccomp system call filtering on Linux. Default is enabled.
#seccomp.enabled: true

View File

@ -333,12 +333,11 @@ setup.kibana:
{%- endmacro %}
{% macro beat_logging(beat_name) -%}
# There are three options for the log output: syslog, file, stderr.
# Under Windows systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
# There are four options for the log output: file, stderr, syslog, eventlog
# The file output is the default.
# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
# Available log levels are: error, warning, info, debug
#logging.level: info
# Enable debug output for selected components. To enable all selectors use ["*"]
@ -347,9 +346,12 @@ setup.kibana:
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: true
#logging.to_syslog: false
# If enabled, apm-server periodically logs its internal metrics that have changed
# Send all logging output to Windows Event Logs. The default is false.
#logging.to_eventlog: false
# If enabled, packetbeat periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
@ -380,6 +382,13 @@ logging.files:
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Enable log file rotation on time intervals in addition to size-based rotation.
# Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
# are boundary-aligned with minutes, hours, days, weeks, months, and years as
# reported by the local system clock. All other intervals are calculated from the
# unix epoch. Defaults to disabled.
#interval: 0
# Set to true to log messages in json format.
#logging.json: false
{%- endmacro %}