openstack-ansible-ops/elk_metrics_6x/roles/elasticsearch/templates/elasticsearch.yml.j2

140 lines
5.6 KiB
Django/Jinja

# ---------------------------------- Cluster -----------------------------------
cluster.name: {{ cluster_name }}
# ------------------------------------ Node ------------------------------------
node.name: {{ ansible_nodename }}
# node.rack: r1
# ----------------------------------- Paths ------------------------------------
# Path to directory where to store the data (separate multiple locations by comma):
#
# path.data: /path/to/data
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
#
# Path to log files:
#
# path.logs: /path/to/logs
#path.logs: /var/lib/elasticsearch/logs/
path.logs: /var/log/elasticsearch/
#
# Path to shared filesystem repos
#
# path.repo: ["/mount/backups", "/mount/longterm_backups"]
#
{% if elastic_shared_fs_repos is defined and elastic_shared_fs_repos|length > 0 %}
path.repo: {{ elastic_shared_fs_repos | json_query("[*].path") | to_json }}
{% endif %}
# Set the global default index store. More information on these settings can be
# found here:
# <https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html>
index.store.type: niofs
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
bootstrap.memory_lock: false
#
# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
# available on the system and that the owner of the process is allowed to use this limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
network.host: ["127.0.0.1", "{{ ansible_host }}", "{{ ansible_hostname }}"]
# Set a custom port for HTTP:
http.port: {{ elastic_port }}
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
# Node definitions can be seen here:
#<https://www.elastic.co/guide/en/elasticsearch/reference/6.2/modules-node.html>
discovery.zen.ping.unicast.hosts: {{ zen_nodes | shuffle(seed=inventory_hostname) | to_json }}
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
discovery.zen.minimum_master_nodes: {{ ((master_node_count | int) // 2) + 1 }}
# The first set of nodes in the master_node_count are marked as such
node.master: {{ elasticsearch_node_master | default(master_node) }}
# Every node in the master list and every other node after will be a data node
node.data: {{ elasticsearch_node_data | default(data_node) }}
# Ingest nodes can execute pre-processing pipelines. To override automatic
# determination, the option `elasticsearch_node_ingest` can be defined as a
# Boolean which will enable or disable ingest nodes. When using automatic
# determination, ingest nodes will follow data nodes.
#
# NOTE(cloudnull): The use of "search remote connect" will follow the enablement
# of an ingest nodes.
{% if elasticsearch_node_ingest is defined %}
node.ingest: {{ elasticsearch_node_ingest }}
search.remote.connect: {{ elasticsearch_node_ingest }}
{% else %}
node.ingest: {{ data_node }}
search.remote.connect: {{ data_node }}
{% endif %}
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
gateway.recover_after_nodes: {{ ((master_node_count | int) // 2) + 1 }}
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
#
# ---------------------------------- Various -----------------------------------
#
# Disable starting multiple nodes on a single system:
#
# node.max_local_storage_nodes: 1
#
# Require explicit names when deleting indices:
#
# action.destructive_requires_name: true
{% set processors = ((elastic_thread_pool_size | int) > 0) | ternary(elastic_thread_pool_size, 1) %}
{% if not (elastic_coordination_node | default(false)) | bool %}
# Thread pool settings. For more on this see the documentation at:
# <https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-threadpool.html>
thread_pool:
index:
queue_size: {{ (processors | int) * 256 }}
get:
queue_size: {{ (processors | int) * 256 }}
write:
queue_size: {{ (processors | int) * 512 }}
{% else %}
# The number of processors is automatically detected, and the thread pool
# settings are automatically set based on it. In some cases it can be useful to
# override the number of detected processors. This can be done by explicitly
# setting the processors setting. On Kibana hosts where elasticsearch is running
# as a coordination node, the processor count is limited.
processors: {{ processors }}
{% endif %}
# Accepts either a percentage or a byte size value. Set to 20%, meaning that 20%
# of the total heap allocated to a node will be used as the indexing buffer size
# shared across all shards.
indices.memory.index_buffer_size: 20%
# Connection throttling on recovery is limited to 20% of the detected interface
# speed with a cap of 750mb. This will improce search speeds and reduce general
# cluster pressure.
indices.recovery.max_bytes_per_sec: {{ elasticserch_interface_speed }}mb
# ---------------------------------- X-Pack ------------------------------------
# X-Pack Monitoring
# https://www.elastic.co/guide/en/elasticsearch/reference/6.3/monitoring-settings.html
xpack.monitoring.collection.enabled: true
xpack.monitoring.collection.interval: 30s