Merge "Add capability to set node role"

This commit is contained in:
Zuul 2018-09-18 22:32:49 +00:00 committed by Gerrit Code Review
commit cf2e5dbdc3
7 changed files with 134 additions and 19 deletions

View File

@ -508,6 +508,56 @@ Overview of kibana custom dashboard
:align: center
Optional | Customize Elasticsearch cluster configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Cluster configuration can be augmented using several variables which will force
a node to use a given role.
Available roles are *data*, *ingest*, and *master*.
* ``elasticsearch_node_data``: This variable will override the automatic node
determination and set a given node to be an "data" node.
* ``elasticsearch_node_ingest``: This variable will override the automatic node
determination and set a given node to be an "ingest" node.
* ``elasticsearch_node_master``: This variable will override the automatic node
determination and set a given node to be an "master" node.
Example setting override options within inventory.
.. code-block:: yaml
hosts:
children:
elastic-logstash:
hosts:
elk1:
ansible_host: 10.0.0.1
ansible_user: root
elasticsearch_node_master: true
elasticsearch_node_data: false
elasticsearch_node_ingest: false
elk2:
ansible_host: 10.0.0.2
ansible_user: root
elasticsearch_node_master: false
elasticsearch_node_data: true
elasticsearch_node_ingest: false
elk3:
ansible_host: 10.0.0.3
ansible_user: root
elasticsearch_node_master: false
elasticsearch_node_data: false
elasticsearch_node_ingest: true
elk4:
ansible_host: 10.0.0.4
ansible_user: root
With the following inventory settings **elk1** would be a master node, **elk2**
would be a data, **elk3** would be an ingest node, and **elk4** would auto
select a role.
Upgrading the cluster
---------------------

View File

@ -52,7 +52,7 @@ if [[ ! -d "${ANSIBLE_EMBED_HOME}/repositories/ansible-config_template" ]]; then
popd
fi
if [[ ! -d "${ANSIBLE_EMBED_HOME}/repositories/openstack_ansible_plugins" ]]; then
if [[ ! -d "${ANSIBLE_EMBED_HOME}/repositories/openstack-ansible-plugins" ]]; then
mkdir -p "${ANSIBLE_EMBED_HOME}/repositories"
git clone https://git.openstack.org/openstack/openstack-ansible-plugins "${ANSIBLE_EMBED_HOME}/repositories/openstack-ansible-plugins"
pushd "${ANSIBLE_EMBED_HOME}/repositories/openstack-ansible-plugins"

View File

@ -21,7 +21,7 @@ storage_node_count: "{{ groups['elastic-logstash'] | length }}"
# 2 node clusters have 1 master-eligable nodes to avoid split-brain
# 3 node clusters have 3 master-eligable nodes
# >3 node clusters have (nodes // 2) eligable masters rounded up to the next odd number
master_node_count: |-
elastic_master_node_count: |-
{% set masters = 0 %}
{% if (storage_node_count | int) < 3 %}
{% set masters = 1 %}
@ -36,10 +36,76 @@ master_node_count: |-
{{ masters }}
# Assign node roles
# the first 'master_node_count' hosts in groups['elastic-logstash'] become master-eligible nodes
# the first 'master_node_count' and subsequent alternate hosts in groups['elastic-logstash'] becomes data nodes
data_nodes: "{{ (groups['elastic-logstash'][:master_node_count | int] + groups['elastic-logstash'][master_node_count | int::2]) }}"
master_nodes: "{{ groups['elastic-logstash'][:master_node_count | int] }}"
# the first 'elastic_master_node_count' hosts in groups['elastic-logstash'] become master-eligible nodes
# the first 'elastic_master_node_count' and subsequent alternate hosts in groups['elastic-logstash'] becomes data nodes
## While the data node group is dynamically chosen the override
## `elasticsearch_node_data` can be used to override the node type.
## Dynamic node inclusion will still work for all other nodes in the group.
_data_nodes: "{{ (groups['elastic-logstash'][:elastic_master_node_count | int] | union(groups['elastic-logstash'][elastic_master_node_count | int::2])) }}"
data_nodes: |-
{% set nodes = [] %}
{% for node in groups['elastic-logstash'] %}
{% if (hostvars[node]['elasticsearch_node_data'] is defined) and (hostvars[node]['elasticsearch_node_data'] | bool) %}
{% set _ = nodes.append(node) %}
{% elif (node in _data_nodes) %}
{% set _ = nodes.append(node) %}
{% endif %}
{% endfor %}
{{ nodes }}
## While the logstash node group is dynamically chosen the override
## `elasticsearch_node_ingest` can be used to override the node type.
## Dynamic node inclusion will still work for all other nodes in the group.
_logstash_nodes: "{{ data_nodes }}"
logstash_nodes: |-
{% set nodes = [] %}
{% for node in groups['elastic-logstash'] %}
{% if (hostvars[node]['elasticsearch_node_ingest'] is defined) and (hostvars[node]['elasticsearch_node_ingest'] | bool) %}
{% set _ = nodes.append(node) %}
{% elif (node in _logstash_nodes) %}
{% set _ = nodes.append(node) %}
{% endif %}
{% endfor %}
{{ nodes }}
## While the logstash node group is dynamically chosen the override
## `elasticsearch_node_ingest` can be used to override the node type.
## Dynamic node inclusion will still work for all other nodes in the group.
_ingest_nodes: "{{ data_nodes }}"
ingest_nodes: |-
{% set nodes = [] %}
{% for node in groups['elastic-logstash'] %}
{% if (hostvars[node]['elasticsearch_node_ingest'] is defined) and (hostvars[node]['elasticsearch_node_ingest'] | bool) %}
{% set _ = nodes.append(node) %}
{% elif (node in _ingest_nodes) %}
{% set _ = nodes.append(node) %}
{% endif %}
{% endfor %}
{{ nodes }}
## While the master node group is dynamically chosen the override
## `elasticsearch_node_master` can be used to override the node type.
## Dynamic node inclusion will still work for all other nodes in the group.
_master_nodes: "{{ groups['elastic-logstash'][:elastic_master_node_count | int] }}"
master_nodes: |-
{% set nodes = [] %}
{% for node in groups['elastic-logstash'] %}
{% if (nodes | length) <= (elastic_master_node_count | int) %}
{% if (hostvars[node]['elasticsearch_node_master'] is defined) and (hostvars[node]['elasticsearch_node_master'] | bool) %}
{% set _ = nodes.append(node) %}
{% endif %}
{% endif %}
{% endfor %}
{% for node in groups['elastic-logstash'] %}
{% if (nodes | length) <= (elastic_master_node_count | int) %}
{% if (node in _master_nodes) %}
{% set _ = nodes.append(node) %}
{% endif %}
{% endif %}
{% endfor %}
{{ nodes }}
master_node_count: "{{ master_nodes | length }}"
coordination_nodes: >-
{{
(groups['kibana'] | map('extract', hostvars, 'ansible_host') | list)
@ -90,7 +156,7 @@ elasticsearch_data_node_details: >-
}}
logstash_data_node_details: >-
{{
(data_nodes | map('extract', hostvars, 'ansible_host') | list) | map('regex_replace', '(.*)' ,'\1:' ~ logstash_beat_input_port) | list
(logstash_nodes | map('extract', hostvars, 'ansible_host') | list) | map('regex_replace', '(.*)' ,'\1:' ~ logstash_beat_input_port) | list
}}
# based on the assignment of roles to hosts, set per host booleans
@ -105,16 +171,14 @@ elastic_thread_pool_size: "{{ ((ansible_processor_count | int) >= 24) | ternary(
# ingest nodes.
elasticsearch_number_of_replicas: "{{ ((data_nodes | length) > 2) | ternary('2', ((data_nodes | length) > 1) | ternary('1', '0')) }}"
elasticsearch_data_hosts: |-
{% set nodes = elasticsearch_data_node_details %}
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
{% set data_hosts = elasticsearch_data_node_details | shuffle(seed=inventory_hostname) %}
{% if inventory_hostname in data_nodes %}
{% set _ = nodes.insert(0, '127.0.0.1:' ~ elastic_port) %}
{% set _ = data_hosts.insert(0, '127.0.0.1:' ~ elastic_port) %}
{% endif %}
{{ data_hosts }}
logstash_data_hosts: |-
{% set nodes = logstash_data_node_details %}
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
{% set data_hosts = logstash_data_node_details | shuffle(seed=inventory_hostname) %}
{% if inventory_hostname in data_nodes %}
{% set _ = nodes.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %}
{% set _ = data_hosts.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %}
{% endif %}
{{ data_hosts }}

View File

@ -8,7 +8,8 @@
-Xms{{ heap_size }}m
# Xmx represents the maximum size of total heap space
-Xmx{{ heap_size }}m
# Sets the thread stack size
-Xss1m
################################################################
## Expert settings

View File

@ -17,7 +17,7 @@
systemd:
name: "logstash"
enabled: true
state: restarted
state: "{{ (inventory_hostname in logstash_nodes) | ternary('restarted', 'stopped') }}"
daemon_reload: true
when:
- ansible_service_mgr == 'systemd'
@ -26,7 +26,7 @@
- name: Enable and restart logstash (upstart)
service:
name: "logstash"
state: restarted
state: "{{ (inventory_hostname in logstash_nodes) | ternary('restarted', 'stopped') }}"
enabled: yes
when:
- ansible_service_mgr == 'upstart'

View File

@ -231,14 +231,14 @@ path.logs: /var/log/logstash
xpack.monitoring.enabled: true
#xpack.monitoring.elasticsearch.username: logstash_system
#xpack.monitoring.elasticsearch.password: password
#xpack.monitoring.elasticsearch.url: ["https://es1:9200", "https://es2:9200"]
xpack.monitoring.elasticsearch.url: ["127.0.0.1:9200"]
#xpack.monitoring.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ]
#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
#xpack.monitoring.elasticsearch.ssl.truststore.password: password
#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.monitoring.elasticsearch.ssl.keystore.password: password
#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
#xpack.monitoring.elasticsearch.sniffing: false
xpack.monitoring.elasticsearch.sniffing: false
xpack.monitoring.collection.interval: 30s
xpack.monitoring.collection.pipeline.details.enabled: true
#

View File

@ -11,4 +11,4 @@ LimitMEMLOCK=infinity
{% endif %}
# Number of File Descriptors
LimitNOFILE=131070
LimitNOFILE=65536