Thread pools should be based on processor counts

The current setup was using processor cores from ansible facts which in
a multi-core, single socket system could result in 1. Using the
processor count will return the logical processor count giving us a more
performant setup when the compute power is present.

Change-Id: Ia5b63d45691f58e848d05cc4a4e5f353b993a347
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-08-25 00:21:04 -05:00
parent 42603ab112
commit ce9007cda5
No known key found for this signature in database
GPG Key ID: 9443251A787B9FB3
7 changed files with 17 additions and 12 deletions

View File

@ -96,7 +96,10 @@ logstash_data_node_details: >-
# based on the assignment of roles to hosts, set per host booleans
master_node: "{{ (inventory_hostname in master_nodes) | ternary(true, false) }}"
data_node: "{{ (inventory_hostname in data_nodes) | ternary(true, false) }}"
elastic_thread_pool_size: "{{ ((ansible_processor_cores | int) >= 24) | ternary(24, ansible_processor_cores) }}"
elastic_processors_floor: "{{ ((ansible_processor_count | int) - 1) }}"
elastic_processors_floor_set: "{{ ((elastic_processors_floor | int) > 0) | ternary(elastic_processors_floor, 1) }}"
elastic_thread_pool_size: "{{ ((ansible_processor_count | int) >= 24) | ternary(23, elastic_processors_floor_set) }}"
# Set a data node facts. The data nodes, in the case of elasticsearch are also
# ingest nodes.

View File

@ -17,7 +17,7 @@ temp_dir: /var/lib/logstash/tmp
logstash_pipelines: "{{lookup('template', 'logstash-pipelines.yml.j2') }}"
# Set processor cores fact
q_storage: "{{ (ansible_processor_cores | int) * (ansible_processor_threads_per_core | int) * 2 }}"
q_storage: "{{ (ansible_processor_count | int) * (ansible_processor_threads_per_core | int) * 2 }}"
# Set logstash facts
logstash_queue_size: "{{ ((((q_storage | int) >= 2) | ternary(q_storage, 2) | int) * 1024) // ((logstash_pipelines | from_yaml) | length) }}"

View File

@ -38,7 +38,10 @@ path.data: /var/lib/logstash
#
# This defaults to the number of the host's CPU cores.
#
{% set processors = ((elastic_thread_pool_size | int) > 0) | ternary(elastic_thread_pool_size, 1) %}
{% set _h_processors = ((ansible_processor_count | int) // 2) %}
{% set _processors = ((_h_processors | int) > 0) | ternary(_h_processors, 1) %}
{% set processors = ((_processors | int) > 8) | ternary(8, _processors) %}
pipeline.workers: {{ processors | int }}
#
# How many events to retrieve from inputs before sending to filters+workers

View File

@ -19,4 +19,9 @@ elasticsearch_node_data: false
elasticsearch_node_ingest: false
elastic_coordination_node: true
elastic_heap_size: "{{ (elastic_heap_size_default | int) // 3 }}"
elastic_thread_pool_size: "{{ ((ansible_processor_cores | int) > 4) | ternary(4, (ansible_processor_cores // 2)) }}"
# This variable is redefined because kibana runs elasticsearch but only in a
# load balancer capacity.
elastic_processors_half: "{{ ((ansible_processor_count | int) // 2) }}"
elastic_processors_half_set: "{{ ((elastic_processors_half | int) > 0) | ternary(elastic_processors_half, 1) }}"
elastic_thread_pool_size: "{{ ((elastic_processors_half_set | int) > 4) | ternary(4, elastic_processors_half_set) }}"

View File

@ -43,7 +43,7 @@ output.logstash:
# less than or equal to 0 disables the splitting of batches. When splitting
# is disabled, the queue decides on the number of events to be contained in a
# batch.
bulk_max_size: {{ (elastic_thread_pool_size | int) * 256 }}
bulk_max_size: {{ (ansible_processor_count | int) * 256 }}
{% if named_index is defined %}
# Optional index name. The default index name is set to {{ named_index }}

View File

@ -45,7 +45,7 @@ xpack.monitoring.elasticsearch:
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
bulk_max_size: {{ (elastic_thread_pool_size | int) * 64 }}
bulk_max_size: {{ (ansible_processor_count | int) * 64 }}
# Configure http request timeout before failing an request to Elasticsearch.
timeout: 120

View File

@ -547,12 +547,6 @@
add_tag => ["apimetrics"]
}
}
} else if "swift-container" in [tags] {
grok {
match => {
"message" => "%{CISCOTIMESTAMP}%{SPACE}%{S3_REQUEST_LINE}%{SPACE}%{CISCOTIMESTAMP}%{SPACE}%{HOSTNAME}%{SPACE}%{PROG}%{SPACE}%{USER}%{SPACE}%{USERNAME}%{SPACE}%{NOTSPACE}%{SPACE}%{S3_REQUEST_LINE}%{SPACE}%{HTTPDUSER}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{INT}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SECOND}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}"
}
}
} else if "swift-account" in [tags] {
grok {
match => {