{% import 'templates/_macros.j2' as elk_macros %} ###################### Packetbeat Configuration Example ####################### # This file is a full configuration example documenting all non-deprecated # options in comments. For a shorter configuration example, that contains only # the most common options, please see packetbeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/packetbeat/index.html #============================== Network device ================================ # Select the network interface to sniff the data. You can use the "any" # keyword to sniff on all connected interfaces. packetbeat.interfaces.device: any # Packetbeat supports three sniffer types: # * pcap, which uses the libpcap library and works on most platforms, but it's # not the fastest option. # * af_packet, which uses memory-mapped sniffing. This option is faster than # libpcap and doesn't require a kernel module, but it's Linux-specific. packetbeat.interfaces.type: af_packet # The maximum size of the packets to capture. The default is 65535, which is # large enough for almost all networks and interface types. If you sniff on a # physical network interface, the optimal setting is the MTU size. On virtual # interfaces, however, it's safer to accept the default value. #packetbeat.interfaces.snaplen: 65535 # The maximum size of the shared memory buffer to use between the kernel and # user space. A bigger buffer usually results in lower CPU usage, but consumes # more memory. This setting is only available for the af_packet sniffer type. # The default is 30 MB. #packetbeat.interfaces.buffer_size_mb: 30 # Packetbeat automatically generates a BPF for capturing only the traffic on # ports where it expects to find known protocols. Use this settings to tell # Packetbeat to generate a BPF filter that accepts VLAN tags. packetbeat.interfaces.with_vlans: true # Use this setting to override the automatically generated BPF filter. #packetbeat.interfaces.bpf_filter: #================================== Flows ===================================== packetbeat.flows: # Enable Network flows. Default: true enabled: true # Set network flow timeout. Flow is killed if no packet is received before being # timed out. timeout: 90s # Configure reporting period. If set to -1, only killed flows will be reported period: 30s #========================== Transaction protocols ============================= packetbeat.protocols: - type: icmp # Enable ICMPv4 and ICMPv6 monitoring. Default: true enabled: true - type: amqp # Enable AMQP monitoring. Default: true enabled: true # Configure the ports where to listen for AMQP traffic. You can disable # the AMQP protocol by commenting out the list of ports. ports: [5672] # Truncate messages that are published and avoid huge messages being # indexed. # Default: 1000 #max_body_length: 1000 # Hide the header fields in header frames. # Default: false parse_headers: true # Hide the additional arguments of method frames. # Default: false parse_arguments: true # Hide all methods relative to connection negotiation between server and # client. # Default: true hide_connection_information: false # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: cassandra #Cassandra port for traffic monitoring. ports: [9042] # If this option is enabled, the raw message of the request (`cassandra_request` field) # is included in published events. The default is true. #send_request: true # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) # is included in published events. The default is true. enable `send_request` first before enable this option. #send_request_header: true # If this option is enabled, the raw message of the response (`cassandra_response` field) # is included in published events. The default is true. #send_response: true # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) # is included in published events. The default is true. enable `send_response` first before enable this option. #send_response_header: true # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. # By default no compressor is configured. #compressor: "snappy" # This option indicates which Operator/Operators will be ignored. #ignored_ops: ["SUPPORTED","OPTIONS"] - type: dhcpv4 # Configure the DHCP for IPv4 ports. ports: [67, 68] - type: dns # Enable DNS monitoring. Default: true #enabled: true # Configure the ports where to listen for DNS traffic. You can disable # the DNS protocol by commenting out the list of ports. ports: [53] # include_authorities controls whether or not the dns.authorities field # (authority resource records) is added to messages. # Default: false include_authorities: true # include_additionals controls whether or not the dns.additionals field # (additional resource records) is added to messages. # Default: false include_additionals: true # send_request and send_response control whether or not the stringified DNS # request and response message are added to the result. # Nearly all data about the request/response is available in the dns.* # fields, but this can be useful if you need visibility specifically # into the request or the response. # Default: false # send_request: true # send_response: true # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: http # Enable HTTP monitoring. Default: true #enabled: true {% set used_ports = [53, 443, 2049, 3306, 5432, 5672, 6379, 9042, 9090, 11211, 27017] %} {% set ports = [] %} {% for item in heartbeat_services %} {% for port in item.ports %} {% if (item.type == 'http') and (not port in used_ports) and (not port in packetbeat_ignored_ports) %} {% set _ = ports.extend([port]) %} {% endif %} {% endfor %} {% endfor %} # Configure the ports where to listen for HTTP traffic. You can disable # the HTTP protocol by commenting out the list of ports. ports: {{ ports | unique }} # Uncomment the following to hide certain parameters in URL or forms attached # to HTTP requests. The names of the parameters are case insensitive. # The value of the parameters will be replaced with the 'xxxxx' string. # This is generally useful for avoiding storing user passwords or other # sensitive information. # Only query parameters and top level form parameters are replaced. # hide_keywords: ['pass', 'password', 'passwd'] # A list of header names to capture and send to Elasticsearch. These headers # are placed under the `headers` dictionary in the resulting JSON. send_headers: true # Instead of sending a white list of headers to Elasticsearch, you can send # all headers by setting this option to true. The default is false. send_all_headers: true # The list of content types for which Packetbeat includes the full HTTP # payload. If the request's or response's Content-Type matches any on this # list, the full body will be included under the request or response field. #include_body_for: [] # The list of content types for which Packetbeat includes the full HTTP # request payload. #include_request_body_for: [] # The list of content types for which Packetbeat includes the full HTTP # response payload. #include_response_body_for: [] # Whether the body of a request must be decoded when a content-encoding # or transfer-encoding has been applied. #decode_body: true # If the Cookie or Set-Cookie headers are sent, this option controls whether # they are split into individual values. #split_cookie: false # The header field to extract the real IP from. This setting is useful when # you want to capture traffic behind a reverse proxy, but you want to get the # geo-location information. #real_ip_header: # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s # Maximum message size. If an HTTP message is larger than this, it will # be trimmed to this size. Default is 10 MB. #max_message_size: 10485760 - type: memcache # Enable memcache monitoring. Default: true #enabled: true # Configure the ports where to listen for memcache traffic. You can disable # the Memcache protocol by commenting out the list of ports. ports: [11211] # Uncomment the parseunknown option to force the memcache text protocol parser # to accept unknown commands. # Note: All unknown commands MUST not contain any data parts! # Default: false # parseunknown: true # Update the maxvalue option to store the values - base64 encoded - in the # json output. # possible values: # maxvalue: -1 # store all values (text based protocol multi-get) # maxvalue: 0 # store no values at all # maxvalue: N # store up to N values # Default: 0 # maxvalues: -1 # Use maxbytespervalue to limit the number of bytes to be copied per value element. # Note: Values will be base64 encoded, so actual size in json document # will be 4 times maxbytespervalue. # Default: unlimited # maxbytespervalue: 100 # UDP transaction timeout in milliseconds. # Note: Quiet messages in UDP binary protocol will get response only in error case. # The memcached analyzer will wait for udptransactiontimeout milliseconds # before publishing quiet messages. Non quiet messages or quiet requests with # error response will not have to wait for the timeout. # Default: 200 # udptransactiontimeout: 1000 # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: mysql # Enable mysql monitoring. Default: true #enabled: true # Configure the ports where to listen for MySQL traffic. You can disable # the MySQL protocol by commenting out the list of ports. ports: [3306,3307] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: pgsql # Enable pgsql monitoring. Default: true enabled: false # Configure the ports where to listen for Pgsql traffic. You can disable # the Pgsql protocol by commenting out the list of ports. ports: [5432] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: redis # Enable redis monitoring. Default: true enabled: false # Configure the ports where to listen for Redis traffic. You can disable # the Redis protocol by commenting out the list of ports. ports: [6379] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: thrift # Enable thrift monitoring. Default: true enabled: false # Configure the ports where to listen for Thrift-RPC traffic. You can disable # the Thrift-RPC protocol by commenting out the list of ports. ports: [9090] # The Thrift transport type. Currently this option accepts the values socket # for TSocket, which is the default Thrift transport, and framed for the # TFramed Thrift transport. The default is socket. #transport_type: socket # The Thrift protocol type. Currently the only accepted value is binary for # the TBinary protocol, which is the default Thrift protocol. #protocol_type: binary # The Thrift interface description language (IDL) files for the service that # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include # parameter and exception names. #idl_files: [] # The maximum length for strings in parameters or return values. If a string # is longer than this value, the string is automatically truncated to this # length. #string_max_size: 200 # The maximum number of elements in a Thrift list, set, map, or structure. #collection_max_size: 15 # If this option is set to false, Packetbeat decodes the method name from the # reply and simply skips the rest of the response message. #capture_reply: true # If this option is set to true, Packetbeat replaces all strings found in # method parameters, return codes, or exception structures with the "*" # string. #obfuscate_strings: false # The maximum number of fields that a structure can have before Packetbeat # ignores the whole transaction. #drop_after_n_struct_fields: 500 # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: mongodb # Enable mongodb monitoring. Default: true enabled: false # Configure the ports where to listen for MongoDB traffic. You can disable # the MongoDB protocol by commenting out the list of ports. ports: [27017] # The maximum number of documents from the response to index in the `response` # field. The default is 10. #max_docs: 10 # The maximum number of characters in a single document indexed in the # `response` field. The default is 5000. You can set this to 0 to index an # unlimited number of characters per document. #max_doc_length: 5000 # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: nfs # Enable NFS monitoring. Default: true enabled: true # Configure the ports where to listen for NFS traffic. You can disable # the NFS protocol by commenting out the list of ports. ports: [2049] # If this option is enabled, the raw message of the request (`request` field) # is sent to Elasticsearch. The default is false. #send_request: false # If this option is enabled, the raw message of the response (`response` # field) is sent to Elasticsearch. The default is false. #send_response: false # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s - type: tls # Enable TLS monitoring. Default: true #enabled: true # Configure the ports where to listen for TLS traffic. You can disable # the TLS protocol by commenting out the list of ports. ports: - 443 # HTTPS - 993 # IMAPS - 995 # POP3S - 5223 # XMPP over SSL - 8443 - 8883 # Secure MQTT - 9243 # Elasticsearch # List of hash algorithms to use to calculate certificates' fingerprints. # Valid values are `sha1`, `sha256` and `md5`. #fingerprints: [sha1] # If this option is enabled, the client and server certificates and # certificate chains are sent to Elasticsearch. The default is true. #send_certificates: true # If this option is enabled, the raw certificates will be stored # in PEM format under the `raw` key. The default is false. #include_raw_certificates: false #=========================== Monitored processes ============================== # Packetbeat can enrich events with information about the process associated # the socket that sent or received the packet if Packetbeat is monitoring # traffic from the host machine. By default process enrichment is disabled. # This feature works on Linux and Windows. packetbeat.procs.enabled: false # If you want to ignore transactions created by the server on which the shipper # is installed you can enable this option. This option is useful to remove # duplicates if shippers are installed on multiple servers. Default value is # false. packetbeat.ignore_outgoing: false #================================ General ====================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. # If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the # output. Fields can be scalar values, arrays, dictionaries, or any nested # combination of these. #fields: # env: staging # If this option is set to true, the custom fields are stored as top-level # fields in the output document instead of being grouped under a fields # sub-dictionary. Default is false. #fields_under_root: false # Internal queue configuration for buffering events to be published. #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs # bulk_max_size) to the output, the moment the output is ready to server # another batch of events. #mem: # Max number of events the queue can buffer. #events: 4096 # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. # The default value is set to 2048. # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, # if the number of events stored in the queue is < min_flush_events. #flush.timeout: 1s # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # # Beta: spooling to disk is currently a beta feature. Use with care. # # The spool file is a circular buffer, which blocks once the file/buffer is full. # Events are put into a write buffer and flushed once the write buffer # is full or the flush_timeout is triggered. # Once ACKed by the output, events are removed immediately from the queue, # making space for new events to be persisted. #spool: # The file namespace configures the file path and the file creation settings. # Once the file exists, the `size`, `page_size` and `prealloc` settings # will have no more effect. #file: # Location of spool file. The default value is ${path.data}/spool.dat. #path: "${path.data}/spool.dat" # Configure file permissions if file is created. The default value is 0600. #permissions: 0600 # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. #size: 100MiB # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. #page_size: 4KiB # If prealloc is set, the required space for the file is reserved using # truncate. The default value is true. #prealloc: true # Spool writer settings # Events are serialized into a write buffer. The write buffer is flushed if: # - The buffer limit has been reached. # - The configured limit of buffered events is reached. # - The flush timeout is triggered. #write: # Sets the write buffer size. #buffer_size: 1MiB # Maximum duration after which events are flushed if the write buffer # is not full yet. The default value is 1s. #flush.timeout: 1s # Number of maximum buffered events. The write buffer is flushed once the # limit is reached. #flush.events: 16384 # Configure the on-disk event encoding. The encoding can be changed # between restarts. # Valid encodings are: json, ubjson, and cbor. #codec: cbor #read: # Reader flush timeout, waiting for more events to become available, so # to fill a complete batch as required by the outputs. # If flush_timeout is 0, all available events are forwarded to the # outputs immediately. # The default value is 0s. #flush.timeout: 0s # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: #================================ Processors =================================== {{ elk_macros.beat_processors(processors) }} # Processors are used to reduce the number of fields in the exported event or to # enhance the event with external metadata. This section defines a list of # processors that are applied one by one and the first one receives the initial # event: # # event -> filter1 -> event1 -> filter2 ->event2 ... # # The supported processors are drop_fields, drop_event, include_fields, # decode_json_fields, and add_cloud_metadata. # # For example, you can use the following processors to keep the fields that # contain CPU load percentages, but remove the fields that contain CPU ticks # values: # #processors: #- include_fields: # fields: ["cpu"] #- drop_fields: # fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: #- drop_event: # when: # equals: # http.code: 200 # # The following example renames the field a to b: # #processors: #- rename: # fields: # - from: "a" # to: "b" # # The following example tokenizes the string into fields: # #processors: #- dissect: # tokenizer: "%{key1} - %{key2}" # field: "message" # target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: #- add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: #- add_locale: # format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: #- add_docker_metadata: # host: "unix:///var/run/docker.sock" # match_fields: ["system.process.cgroup.id"] # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 # match_short_id: false # cleanup_timeout: 60 # labels.dedot: false # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: # # certificate_authority: "/etc/pki/root/ca.pem" # # certificate: "/etc/pki/client/cert.pem" # # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: #- add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: #- add_host_metadata: # netinfo.enabled: false # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: #- add_process_metadata: # match_pids: ["system.process.ppid"] # target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: #- decode_json_fields: # fields: ["field1", "field2", ...] # process_array: false # max_depth: 1 # target: "" # overwrite_keys: false #============================= Elastic Cloud ================================== # These settings simplify using packetbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. # You can find the `cloud.id` in the Elastic Cloud web UI. #cloud.id: # The cloud.auth setting overwrites the `output.elasticsearch.username` and # `output.elasticsearch.password` settings. The format is `:`. #cloud.auth: #================================ Outputs ====================================== # Configure what output to use when sending the data collected by the beat. #-------------------------- Elasticsearch output ------------------------------- #output.elasticsearch: # # Boolean flag to enable or disable the output module. # #enabled: true # # # Array of hosts to connect to. # # Scheme and port can be left out and will be set to the default (http and 9200) # # In case you specify and additional path, the scheme is required: http://localhost:9200/path # # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 # hosts: ["localhost:9200"] # # # Set gzip compression level. # #compression_level: 0 # # # Configure escaping HTML symbols in strings. # #escape_html: false # # # Optional protocol and basic auth credentials. # #protocol: "https" # #username: "elastic" # #password: "changeme" # # # Dictionary of HTTP parameters to pass within the URL with index operations. # #parameters: # #param1: value1 # #param2: value2 # # # Number of workers per Elasticsearch host. # #worker: 1 # # # Optional index name. The default is "packetbeat" plus date # # and generates [packetbeat-]YYYY.MM.DD keys. # # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. # #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" # # # Optional ingest node pipeline. By default no pipeline will be used. # #pipeline: "" # # # Optional HTTP path # #path: "/elasticsearch" # # # Custom HTTP headers to add to each request # #headers: # # X-My-Header: Contents of the header # # # Proxy server URL # #proxy_url: http://proxy:3128 # # # The number of times a particular Elasticsearch index operation is attempted. If # # the indexing operation doesn't succeed after this many retries, the events are # # dropped. The default is 3. # #max_retries: 3 # # # The maximum number of events to bulk in a single Elasticsearch bulk API index request. # # The default is 50. # #bulk_max_size: 50 # # # The number of seconds to wait before trying to reconnect to Elasticsearch # # after a network error. After waiting backoff.init seconds, the Beat # # tries to reconnect. If the attempt fails, the backoff timer is increased # # exponentially up to backoff.max. After a successful connection, the backoff # # timer is reset. The default is 1s. # #backoff.init: 1s # # # The maximum number of seconds to wait before attempting to connect to # # Elasticsearch after a network error. The default is 60s. # #backoff.max: 60s # # # Configure HTTP request timeout before failing a request to Elasticsearch. # #timeout: 90 # # # Use SSL settings for HTTPS. # #ssl.enabled: true # # # Configure SSL verification mode. If `none` is configured, all server hosts # # and certificates will be accepted. In this mode, SSL-based connections are # # susceptible to man-in-the-middle attacks. Use only for testing. Default is # # `full`. # #ssl.verification_mode: full # # # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # # 1.2 are enabled. # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # # # List of root certificates for HTTPS server verifications # #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # # # Certificate for SSL client authentication # #ssl.certificate: "/etc/pki/client/cert.pem" # # # Client certificate key # #ssl.key: "/etc/pki/client/cert.key" # # # Optional passphrase for decrypting the certificate key. # #ssl.key_passphrase: '' # # # Configure cipher suites to be used for SSL connections # #ssl.cipher_suites: [] # # # Configure curve types for ECDHE-based cipher suites # #ssl.curve_types: [] # # # Configure what types of renegotiation are supported. Valid options are # # never, once, and freely. Default is never. # #ssl.renegotiation: never # #----------------------------- Logstash output --------------------------------- {{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} #------------------------------- Kafka output ---------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true # The list of Kafka broker addresses from which to fetch the cluster metadata. # The cluster metadata contain the actual Kafka brokers events are published # to. #hosts: ["localhost:9092"] # The Kafka topic used for produced events. The setting can be a format string # using any event field. To set the topic from document type use `%{[type]}`. #topic: beats # The Kafka event key setting. Use format string to create a unique event key. # By default no event key will be generated. #key: '' # The Kafka event partitioning strategy. Default hashing strategy is `hash` # using the `output.kafka.key` setting or randomly distributes events if # `output.kafka.key` is not configured. #partition.hash: # If enabled, events will only be published to partitions with reachable # leaders. Default is false. #reachable_only: false # Configure alternative event field names used to compute the hash value. # If empty `output.kafka.key` setting will be used. # Default value is empty list. #hash: [] # Authentication details. Password is required if username is set. #username: '' #password: '' # Kafka version packetbeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # Metadata update configuration. Metadata contains leader information # used to decide which broker to use when publishing. #metadata: # Max metadata request retry attempts when cluster is in middle of leader # election. Defaults to 3 retries. #retry.max: 3 # Wait time between retries during leader elections. Default is 250ms. #retry.backoff: 250ms # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m # Strategy for fetching the topics metadata from the broker. Default is true. #full: true # The number of concurrent load-balanced Kafka output workers. #worker: 1 # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s # The maximum duration a broker will wait for number of required ACKs. The # default is 10s. #broker_timeout: 10s # The number of messages buffered for each Kafka broker. The default is 256. #channel_buffer_size: 256 # The keep-alive period for an active network connection. If 0s, keep-alives # are disabled. The default is 0 seconds. #keep_alive: 0 # Sets the output compression codec. Must be one of none, snappy and gzip. The # default is gzip. #compression: gzip # Set the compression level. Currently only gzip provides a compression level # between 0 and 9. The default value is chosen by the compression algorithm. #compression_level: 4 # The maximum permitted size of JSON-encoded messages. Bigger messages will be # dropped. The default value is 1000000 (bytes). This value should be equal to # or less than the broker's message.max.bytes. #max_message_bytes: 1000000 # The ACK reliability level required from broker. 0=no response, 1=wait for # local commit, -1=wait for all replicas to commit. The default is 1. Note: # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently # on error. #required_acks: 1 # The configurable ClientID used for logging, debugging, and auditing # purposes. The default is "beats". #client_id: beats # Enable SSL support. SSL is automatically enabled if any SSL setting is set. #ssl.enabled: true # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #------------------------------- Redis output ---------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty print json event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The # default is packetbeat. #key: packetbeat # The password to authenticate to Redis with. The default is no authentication. #password: # The Redis database number where the events are published. The default is 0. #db: 0 # The Redis data type to use for publishing events. If the data type is list, # the Redis RPUSH command is used. If the data type is channel, the Redis # PUBLISH command is used. The default value is list. #datatype: list # The number of workers to use for each host configured to publish events to # Redis. Use this setting along with the loadbalance option. For example, if # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each # host). #worker: 1 # If set to true and multiple hosts or workers are configured, the output # plugin load balances published events onto all Redis hosts. If set to false, # the output plugin sends all events to only one host (determined at random) # and will switch to another host if the currently selected one becomes # unreachable. The default value is true. #loadbalance: true # The Redis connection timeout in seconds. The default is 5 seconds. #timeout: 5s # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The number of seconds to wait before trying to reconnect to Redis # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Redis after a network error. The default is 60s. #backoff.max: 60s # The maximum number of events to bulk in a single Redis request or pipeline. # The default is 2048. #bulk_max_size: 2048 # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The # value must be a URL with a scheme of socks5://. #proxy_url: # This option determines whether Redis hostnames are resolved locally when # using a proxy. The default value is false, which means that name resolution # occurs on the proxy server. #proxy_use_local_resolver: false # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #------------------------------- File output ----------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # Path to the directory where to save the generated files. The option is # mandatory. #path: "/tmp/packetbeat" # Name of the generated files. The default is `packetbeat` and it generates # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. #filename: packetbeat # Maximum size in kilobytes of each file. When this size is reached, and on # every packetbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 # Maximum number of files under path. When this number of files is reached, # the oldest file is deleted and the rest are shifted from last to first. The # default is 7 files. #number_of_files: 7 # Permissions to use for file creation. The default is 0600. #permissions: 0600 #----------------------------- Console output --------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false #================================= Paths ====================================== # The home path for the packetbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: # The configuration path for the packetbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} # The data path for the packetbeat installation. This is the default base path # for all the files in which packetbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data # The logs path for a packetbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs #================================ Keystore ========================================== # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" #============================== Dashboards ===================================== {{ elk_macros.setup_dashboards('packetbeat') }} #============================== Template ===================================== {{ elk_macros.setup_template('packetbeat', inventory_hostname, data_nodes, elasticsearch_beat_settings) }} #============================== Setup ILM ===================================== # Configure Index Lifecycle Management Index Lifecycle Management creates a # write alias and adds additional settings to the template. # The elasticsearch.output.index setting will be replaced with the write alias # if ILM is enabled. # Enabled ILM support. Valid values are true, false, and auto. The beat will # detect availabilty of Index Lifecycle Management in Elasticsearch and enable # or disable ILM support. #setup.ilm.enabled: auto # Configure the ILM write alias name. #setup.ilm.rollover_alias: "packetbeat" # Configure rollover index pattern. #setup.ilm.pattern: "{now/d}-000001" {% if ilm_policy_name is defined %} setup.ilm.policy_name: "{{ ilm_policy_name }}" {% endif %} {% if ilm_policy_file_location is defined %} setup.ilm.policy_file: "{{ ilm_policy_file_location }}/{{ ilm_policy_filename }}" {% endif %} #============================== Kibana ===================================== {% if (groups['kibana'] | length) > 0 %} {{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} {% endif %} #================================ Logging ====================================== {{ elk_macros.beat_logging('packetbeat', packetbeat_log_level) }} #============================== Xpack Monitoring ===================================== {{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} #================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false # The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true #================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false