Subdivision of elastic-logstash group
It is currently assumed that the elasticsearch data nodes and the logstash nodes are co-located within the same container during deployment of the ELK stack. This commit is intended to remove that assumption by requiring specification of separate elastic and logstash groups. This is particularly important where instances of elasticsearch and logstash are separated for resource reasons, a common setup for an elastic cluster. The default setup for the elastic nodes has been simplified, with all non-coordinating nodes being both data and ingest nodes unless otherwise specified. This was previously defined algorithmically, with the elastic node setup tightly coupled to the placement of the logstash instances. Change-Id: Id06eb78b52705aefea9cfe1247f53bac58badd52
This commit is contained in:
parent
498266e4c9
commit
4851e29e9c
@ -512,7 +512,7 @@ Optional | Customize Elasticsearch cluster configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Cluster configuration can be augmented using several variables which will force
|
||||
a node to use a given role.
|
||||
a node to use a given role. By default all nodes are data and ingest eligible.
|
||||
|
||||
Available roles are *data*, *ingest*, and *master*.
|
||||
|
||||
@ -529,7 +529,7 @@ Example setting override options within inventory.
|
||||
|
||||
hosts:
|
||||
children:
|
||||
elastic-logstash:
|
||||
elastic:
|
||||
hosts:
|
||||
elk1:
|
||||
ansible_host: 10.0.0.1
|
||||
@ -552,11 +552,15 @@ Example setting override options within inventory.
|
||||
elk4:
|
||||
ansible_host: 10.0.0.4
|
||||
ansible_user: root
|
||||
logstash:
|
||||
children:
|
||||
elk3:
|
||||
elk4:
|
||||
|
||||
With the following inventory settings **elk1** would be a master node, **elk2**
|
||||
would be a data, **elk3** would be an ingest node, and **elk4** would auto
|
||||
select a role.
|
||||
|
||||
would be a data, **elk3** would be an ingest node, and **elk4** would be both a
|
||||
data and an ingest node. **elk3** and **elk4** would become the nodes hosting
|
||||
logstash instances.
|
||||
|
||||
Upgrading the cluster
|
||||
---------------------
|
||||
|
@ -12,7 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
- name: Create/Setup known indexes in Elasticsearch
|
||||
hosts: "elastic-logstash[0]"
|
||||
hosts: "elastic[0]"
|
||||
become: true
|
||||
|
||||
roles:
|
||||
|
@ -4,11 +4,13 @@ component_skel:
|
||||
belongs_to:
|
||||
- elk_all
|
||||
- apm_all
|
||||
elastic-logstash:
|
||||
elastic:
|
||||
belongs_to:
|
||||
- elk_all
|
||||
- elasticsearch
|
||||
- elasticsearch_all
|
||||
logstash:
|
||||
belongs_to:
|
||||
- logstash
|
||||
- logstash_all
|
||||
kibana:
|
||||
|
@ -32,7 +32,7 @@
|
||||
- apm-server
|
||||
|
||||
- name: Setup apm-server rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -29,7 +29,7 @@
|
||||
- beat-install
|
||||
|
||||
- name: Setup auditbeat rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -12,7 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
- name: Install Elastic Search
|
||||
hosts: "elastic-logstash:kibana"
|
||||
hosts: elastic:kibana
|
||||
become: true
|
||||
|
||||
vars_files:
|
||||
|
@ -29,7 +29,7 @@
|
||||
- beat-install
|
||||
|
||||
- name: Setup filebeat rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -46,7 +46,7 @@
|
||||
- beat-install
|
||||
|
||||
- name: Setup heartbeat rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -72,7 +72,7 @@
|
||||
- beat-install
|
||||
|
||||
- name: Setup journalbeat rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -12,7 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
- name: Install Logstash
|
||||
hosts: elastic-logstash
|
||||
hosts: logstash
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
|
@ -30,7 +30,7 @@
|
||||
- beat-install
|
||||
|
||||
- name: Setup metricbeat rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -29,7 +29,7 @@
|
||||
- beat-install
|
||||
|
||||
- name: Setup packetbeat rollup
|
||||
hosts: elastic-logstash[0]
|
||||
hosts: elastic[0]
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
@ -14,12 +14,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
# storage node count is equal to the cluster size
|
||||
storage_node_count: "{{ groups['elastic-logstash'] | length }}"
|
||||
storage_node_count: "{{ groups['elastic'] | length }}"
|
||||
|
||||
# the elasticsearch cluster elects one master from all those which are marked as master-eligible
|
||||
# 1 node cluster can only have one master
|
||||
# 2 node clusters have 1 master-eligable nodes to avoid split-brain
|
||||
# 3 node clusters have 3 master-eligable nodes
|
||||
# 2 node clusters have 1 master-eligible nodes to avoid split-brain
|
||||
# 3 node clusters have 3 master-eligible nodes
|
||||
# >3 node clusters have (nodes // 2) eligable masters rounded up to the next odd number
|
||||
elastic_master_node_count: |-
|
||||
{% set masters = 0 %}
|
||||
@ -35,83 +35,41 @@ elastic_master_node_count: |-
|
||||
{% endif %}
|
||||
{{ masters }}
|
||||
|
||||
# Assign node roles
|
||||
# the first 'elastic_master_node_count' hosts in groups['elastic-logstash'] become master-eligible nodes
|
||||
# the first 'elastic_master_node_count' and subsequent alternate hosts in groups['elastic-logstash'] becomes data nodes
|
||||
## While the data node group is dynamically chosen the override
|
||||
## `elasticsearch_node_data` can be used to override the node type.
|
||||
## Dynamic node inclusion will still work for all other nodes in the group.
|
||||
_data_nodes: "{{ (groups['elastic-logstash'][:elastic_master_node_count | int] | union(groups['elastic-logstash'][elastic_master_node_count | int::2])) }}"
|
||||
## Assign node roles
|
||||
# By default, let all elastic cluster nodes be data unless overridden using elasticsearch_node_data: false
|
||||
data_nodes: |-
|
||||
{% set nodes = [] %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% if (hostvars[node]['elasticsearch_node_data'] is defined) and (hostvars[node]['elasticsearch_node_data'] | bool) %}
|
||||
{% for node in groups['elastic'] %}
|
||||
{% if not ((hostvars[node]['elasticsearch_node_data'] is defined) and (not (hostvars[node]['elasticsearch_node_data'] | bool))) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% if (nodes | length) <= (_data_nodes | length) %}
|
||||
{% if (node in _data_nodes) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodes }}
|
||||
|
||||
## While the logstash node group is dynamically chosen the override
|
||||
## `elasticsearch_node_ingest` can be used to override the node type.
|
||||
## Dynamic node inclusion will still work for all other nodes in the group.
|
||||
_logstash_nodes: "{{ data_nodes }}"
|
||||
logstash_nodes: |-
|
||||
{% set nodes = [] %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% if (hostvars[node]['elasticsearch_node_ingest'] is defined) and (hostvars[node]['elasticsearch_node_ingest'] | bool) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% if (nodes | length) <= (_logstash_nodes | length) %}
|
||||
{% if (node in _logstash_nodes) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodes }}
|
||||
|
||||
## While the logstash node group is dynamically chosen the override
|
||||
## `elasticsearch_node_ingest` can be used to override the node type.
|
||||
## Dynamic node inclusion will still work for all other nodes in the group.
|
||||
_ingest_nodes: "{{ data_nodes }}"
|
||||
# By default, let all elastic cluster nodes be ingest unless overridden using elasticsearch_node_ingest: false
|
||||
ingest_nodes: |-
|
||||
{% set nodes = [] %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% if (hostvars[node]['elasticsearch_node_ingest'] is defined) and (hostvars[node]['elasticsearch_node_ingest'] | bool) %}
|
||||
{% for node in groups['elastic'] %}
|
||||
{% if not ((hostvars[node]['elasticsearch_node_ingest'] is defined) and (not (hostvars[node]['elasticsearch_node_ingest'] | bool))) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% if (nodes | length) <= (_ingest_nodes | length) %}
|
||||
{% if (node in _ingest_nodes) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{ nodes }}
|
||||
|
||||
## While the master node group is dynamically chosen the override
|
||||
## `elasticsearch_node_master` can be used to override the node type.
|
||||
## Dynamic node inclusion will still work for all other nodes in the group.
|
||||
_master_nodes: "{{ groups['elastic-logstash'][:elastic_master_node_count | int] }}"
|
||||
_master_nodes: "{{ groups['elastic'][:elastic_master_node_count | int] }}"
|
||||
master_nodes: |-
|
||||
{% set nodes = [] %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% for node in groups['elastic'] %}
|
||||
{% if (nodes | length) <= (elastic_master_node_count | int) %}
|
||||
{% if (hostvars[node]['elasticsearch_node_master'] is defined) and (hostvars[node]['elasticsearch_node_master'] | bool) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for node in groups['elastic-logstash'] %}
|
||||
{% for node in groups['elastic'] %}
|
||||
{% if (nodes | length) <= (elastic_master_node_count | int) %}
|
||||
{% if (node in _master_nodes) %}
|
||||
{% set _ = nodes.append(node) %}
|
||||
@ -125,7 +83,7 @@ coordination_nodes: |-
|
||||
{% if (groups['kibana'] | length) > 0 %}
|
||||
{% set c_nodes = groups['kibana'] %}
|
||||
{% else %}
|
||||
{% set c_nodes = groups['elastic-logstash'] %}
|
||||
{% set c_nodes = groups['elastic'] %}
|
||||
{% endif %}
|
||||
{{
|
||||
(elasticsearch_coordination_node_socket_addresses
|
||||
@ -135,7 +93,7 @@ coordination_nodes: |-
|
||||
|
||||
zen_nodes: >-
|
||||
{{
|
||||
(groups['elastic-logstash'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list | shuffle(seed=inventory_hostname)
|
||||
(groups['elastic'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list | shuffle(seed=inventory_hostname)
|
||||
}}
|
||||
|
||||
elasticserch_interface_speed: |-
|
||||
@ -189,12 +147,11 @@ elasticsearch_data_node_details: >-
|
||||
logstash_data_node_details: >-
|
||||
{{
|
||||
logstash_data_node_socket_addresses
|
||||
| default((logstash_nodes | map('extract', hostvars, 'ansible_host') | list)
|
||||
| default((groups['logstash'] | map('extract', hostvars, 'ansible_host') | list)
|
||||
| map('regex_replace', '(.*)' ,'\1:' ~ logstash_beat_input_port) | list)
|
||||
}}
|
||||
|
||||
|
||||
|
||||
# based on the assignment of roles to hosts, set per host booleans
|
||||
master_node: "{{ (inventory_hostname in master_nodes) | ternary(true, false) }}"
|
||||
data_node: "{{ (inventory_hostname in data_nodes) | ternary(true, false) }}"
|
||||
@ -212,7 +169,7 @@ elasticsearch_beat_settings:
|
||||
number_of_replicas: "{{ elasticsearch_number_of_replicas }}"
|
||||
max_docvalue_fields_search: "{{ elastic_max_docvalue_fields_search | default('100') }}"
|
||||
|
||||
|
||||
# Shuffled elasticsearch endpoints (with localhost if relevant) for use in beat config files
|
||||
elasticsearch_data_hosts: |-
|
||||
{% set data_hosts = elasticsearch_data_node_details | shuffle(seed=inventory_hostname) %}
|
||||
{% if inventory_hostname in data_nodes %}
|
||||
@ -220,9 +177,10 @@ elasticsearch_data_hosts: |-
|
||||
{% endif %}
|
||||
{{ data_hosts }}
|
||||
|
||||
# Shuffled logstash endpoints (with localhost if relevant) for use in beat config files
|
||||
logstash_data_hosts: |-
|
||||
{% set data_hosts = logstash_data_node_details | shuffle(seed=inventory_hostname) %}
|
||||
{% if inventory_hostname in data_nodes %}
|
||||
{% if inventory_hostname in groups['logstash'] %}
|
||||
{% set _ = data_hosts.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %}
|
||||
{% endif %}
|
||||
{{ data_hosts }}
|
||||
|
@ -17,4 +17,4 @@ default_ilm_policy:
|
||||
default_ilm_policy_filename: "default-ilm-policy.json"
|
||||
default_ilm_policy_file_location: "/tmp"
|
||||
|
||||
elastic_beat_no_proxy: "{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}"
|
||||
elastic_beat_no_proxy: "{{ hostvars[groups['elastic'][0]]['ansible_host'] }}"
|
||||
|
@ -22,7 +22,7 @@ kibana_username: admin
|
||||
kibana_password: admin
|
||||
kibana_nginx_port: 81
|
||||
kibana_server_name: "{{ ansible_hostname }}"
|
||||
kibana_index_on_elasticsearch: "http://{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}:{{ elastic_port}}/.kibana"
|
||||
kibana_index_on_elasticsearch: "http://{{ hostvars[groups['elastic'][0]]['ansible_host'] }}:{{ elastic_port}}/.kibana"
|
||||
kibana_elastic_request_timeout: 1800000
|
||||
|
||||
# If unset, the protocol and port default to http and elastic_port
|
||||
|
@ -17,7 +17,7 @@
|
||||
systemd:
|
||||
name: "logstash"
|
||||
enabled: true
|
||||
state: "{{ (inventory_hostname in logstash_nodes) | ternary('restarted', 'stopped') }}"
|
||||
state: "restarted"
|
||||
daemon_reload: true
|
||||
when:
|
||||
- ansible_service_mgr == 'systemd'
|
||||
@ -26,7 +26,7 @@
|
||||
- name: Enable and restart logstash (upstart)
|
||||
service:
|
||||
name: "logstash"
|
||||
state: "{{ (inventory_hostname in logstash_nodes) | ternary('restarted', 'stopped') }}"
|
||||
state: "restarted"
|
||||
enabled: yes
|
||||
when:
|
||||
- ansible_service_mgr == 'upstart'
|
||||
|
@ -224,7 +224,7 @@ metricbeat.modules:
|
||||
# enabled: true
|
||||
#
|
||||
#---------------------------- Elasticsearch Module ---------------------------
|
||||
{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana'])) %}
|
||||
{% if inventory_hostname in (groups['elastic'] | union(groups['kibana'])) %}
|
||||
- module: elasticsearch
|
||||
metricsets:
|
||||
- node
|
||||
@ -485,7 +485,7 @@ metricbeat.modules:
|
||||
{% endif %}
|
||||
|
||||
#------------------------------ Logstash Module ------------------------------
|
||||
{% if inventory_hostname in groups['elastic-logstash'] | default([]) %}
|
||||
{% if inventory_hostname in groups['logstash'] | default([]) %}
|
||||
- module: logstash
|
||||
metricsets: ["node", "node_stats"]
|
||||
enabled: true
|
||||
|
@ -26,7 +26,7 @@
|
||||
- always
|
||||
|
||||
- name: Set elasticsearch variables
|
||||
include_vars: "vars_{{ ((inventory_hostname in (groups['kibana'] | default([])) and not inventory_hostname in (groups['elastic-logstash']) | default([]))) | ternary('kibana', 'default') }}.yml"
|
||||
include_vars: "vars_{{ ((inventory_hostname in (groups['kibana'] | default([])) and not inventory_hostname in (groups['elastic']) | default([]))) | ternary('kibana', 'default') }}.yml"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
- name: Install Kibana Dashboards
|
||||
hosts: "elastic-logstash"
|
||||
hosts: elastic
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
@ -82,5 +82,5 @@
|
||||
- name: Upload Custom Openstack Log Dashboard
|
||||
shell: "/opt/elasticdump/node_modules/elasticdump/bin/elasticdump --input=/tmp/openstack-log-dashboard.json --output={{ kibana_index_on_elasticsearch }} --type=data"
|
||||
environment:
|
||||
no_proxy: "{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}"
|
||||
no_proxy: "{{ hostvars[groups[elastic[0]]]['ansible_host'] }}"
|
||||
run_once: yes
|
||||
|
@ -12,7 +12,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
- name: Dump cluster node info
|
||||
hosts: elastic-logstash:kibana
|
||||
hosts: elastic:logstash:kibana
|
||||
become: true
|
||||
|
||||
vars_files:
|
||||
@ -46,7 +46,7 @@
|
||||
debug:
|
||||
msg: "Zen nodes: {{ zen_nodes }}"
|
||||
|
||||
delegate_to: elastic_logstash[0]
|
||||
delegate_to: elastic[0]
|
||||
run_once: true
|
||||
|
||||
# show per node cluster setup
|
||||
|
@ -255,7 +255,7 @@ setup.dashboards.enabled: false
|
||||
# These settings can be adjusted to load your own template or overwrite existing ones.
|
||||
|
||||
# Set to false to disable template loading.
|
||||
setup.template.enabled: {{ host == data_nodes[0] }}
|
||||
setup.template.enabled: {{ host == data_nodes[0] | default(false) }}
|
||||
|
||||
# Template name. By default the template name is "{{ beat_name }}-%{[beat.version]}"
|
||||
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
|
||||
@ -279,7 +279,7 @@ setup.template.fields: "${path.config}/fields.yml"
|
||||
#setup.template.json.name: ""
|
||||
|
||||
# Overwrite existing template
|
||||
setup.template.overwrite: {{ host == data_nodes[0] }}
|
||||
setup.template.overwrite: {{ host == data_nodes[0] | default(false)}}
|
||||
|
||||
{% set shards = 1 %}
|
||||
|
||||
|
@ -31,7 +31,7 @@ all_containers:
|
||||
elastic_memory_lock: false
|
||||
|
||||
children:
|
||||
elastic-logstash:
|
||||
elastic:
|
||||
children:
|
||||
kibana:
|
||||
hosts:
|
||||
@ -46,6 +46,9 @@ all_containers:
|
||||
elastic2:
|
||||
ansible_host: 172.29.236.102
|
||||
ansible_user: root
|
||||
logstash:
|
||||
children:
|
||||
kibana:
|
||||
|
||||
apm-server:
|
||||
hosts:
|
||||
|
@ -11,7 +11,11 @@ hosts:
|
||||
vars:
|
||||
physical_host: localhost
|
||||
|
||||
elastic-logstash:
|
||||
elastic:
|
||||
hosts:
|
||||
localhost: {}
|
||||
|
||||
logstash:
|
||||
hosts:
|
||||
localhost: {}
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
|
||||
- name: Test elasticsearch api
|
||||
hosts: elastic-logstash
|
||||
hosts: elastic
|
||||
gather_facts: false
|
||||
become: true
|
||||
|
||||
@ -64,7 +64,7 @@
|
||||
|
||||
|
||||
- name: Test kibana api
|
||||
hosts: elastic-logstash
|
||||
hosts: elastic
|
||||
gather_facts: false
|
||||
become: true
|
||||
|
||||
@ -91,7 +91,7 @@
|
||||
|
||||
|
||||
- name: Test logstash api
|
||||
hosts: elastic-logstash
|
||||
hosts: logstash
|
||||
gather_facts: false
|
||||
become: true
|
||||
|
||||
|
@ -380,7 +380,7 @@ grafana_datasources:
|
||||
elastic_beats:
|
||||
logstash:
|
||||
make_index: true
|
||||
hosts: "{{ groups['elastic-logstash'] | default([]) }}"
|
||||
hosts: "{{ logstash | default([]) }}"
|
||||
apm:
|
||||
make_index: true
|
||||
timeFieldName: '@timestamp'
|
||||
|
Loading…
Reference in New Issue
Block a user