Move most of the variables into the roles

Change-Id: I82a48c554c164c7166c1a0d4e3192332af5024fb
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-08-12 01:01:40 -05:00 committed by Kevin Carter (cloudnull)
parent 45df59ed7e
commit 8db0238749
16 changed files with 189 additions and 135 deletions

View File

@ -0,0 +1,19 @@
---
# Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# APM vars
apm_interface: 0.0.0.0
apm_port: 8200
apm_token: SuperSecrete

View File

@ -15,7 +15,7 @@
{% set action_items = [] -%}
{# Delete index loop #}
{% for key in elastic_beat_retention_policy_hosts.keys() -%}
{% for key in elastic_beat_retention_policy_keys -%}
{% set delete_indices = {} -%}
{% set index_retention = hostvars[inventory_hostname]['elastic_' + key + '_retention'] -%}
{% set _ = delete_indices.update(

View File

@ -15,7 +15,7 @@
client:
hosts:
- {{ ansible_host }}
port: 9200
port: {{ elastic_port }}
url_prefix: ""
use_ssl: false
ssl_no_validate: true

View File

@ -0,0 +1,17 @@
---
# Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This interface is used to determine cluster recovery speed.
elastic_data_interface: "{{ ansible_default_ipv4['alias'] }}"

View File

@ -20,3 +20,18 @@ q_mem: "{{ (ansible_memtotal_mb | int) // 3 }}"
# Option to define half memory
h_mem: "{{ (ansible_memtotal_mb | int) // 2 }}"
#define this in host/group vars as needed to mount remote filesystems
#set the client address as appropriate, eth1 assumes osa container mgmt network
#mountpoints and server paths are just examples
#elastic_shared_fs_repos:
# - fstype: nfs4
# src: "<nfs-server-ip>:/esbackup"
# opts: clientaddr="{{ ansible_eth1['ipv4']['address'] }}"
# path: "/elastic-backup"
# state: mounted
# EXPERIMENTAL - When the heap size for a given elastic node is graeter than
# 4GiB the G1 garbage collector can be enabled. This is an
# experimental feature and may be removed later.
elastic_g1gc_enabled: false

View File

@ -14,3 +14,13 @@
# limitations under the License.
kibana_enable_basic_auth: false
# kibana vars
kibana_interface: 0.0.0.0
kibana_port: 5601
kibana_username: admin
kibana_password: admin
kibana_nginx_port: 81
kibana_server_name: "{{ ansible_hostname }}"
kibana_index_on_elasticsearch: "http://{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}:{{ elastic_port}}/.kibana"
kibana_elastic_request_timeout: 600000

View File

@ -31,3 +31,47 @@ logstash_syslog_input_enabled: false
# Changing this port to 514 will require overrides to the service files making
# logstash run as root (not recommended).
logstash_syslog_input_port: 1514
logstash_beat_input_port: 5044
logstash_deploy_filters: true
## Logstash config showing a complete kafka setup using SSL for authentication.
# logstash_kafka_options:
# codec: json
# topic_id: "elk_kafka"
# ssl_key_password: "{{ logstash_kafka_ssl_key_password }}"
# ssl_keystore_password: "{{ logstash_kafka_ssl_keystore_password }}"
# ssl_keystore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_keystore_location | basename }}"
# ssl_truststore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_truststore_location | basename }}"
# ssl_truststore_password: "{{ logstash_kafka_ssl_truststore_password }}"
# bootstrap_servers:
# - server1.local:9092
# - server2.local:9092
# - server3.local:9092
# client_id: "elk_metrics_6x"
# compression_type: "gzip"
# security_protocol: "SSL"
## The following variables are options that correspond to the
## `logstash_kafka_options` variable.
# logstash_kafka_ssl_key_password: "secrete"
# logstash_kafka_ssl_keystore_password: "secrete"
# logstash_kafka_ssl_truststore_password: "secrete"
# logstash_kafka_ssl_keystore_location: "/root/kafka/keystore.jks"
# logstash_kafka_ssl_truststore_location: "/root/kafka/truststore.jks"
## Setup servers that read events from the Smart Connector directly. This
## supports multiple entries in list format using the "host" and "port" for the
## smart connector.
# logstash_arcsight_smart_connectors:
# - host: 127.0.0.1
# port: 5000
logstash_arcsight_smart_connectors: []
## Setup servers to read events from the Eevnt Broker Stream. This
## multiple entries in list format using the "host" and "port" for the
## for the event brokers.
# logstash_arcsight_event_brokers:
# - host: 127.0.0.1
# port: 5000
logstash_arcsight_event_brokers: []

View File

@ -0,0 +1,18 @@
---
# Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#metricbeats monitoring endpoints
elastic_metricbeat_rabbitmq_monitoring_hosts: '"localhost:15672"'
elastic_metricbeat_haproxy_monitoring_hosts: '"unix:///var/run/haproxy.stat"'

View File

@ -178,12 +178,12 @@ metricbeat.modules:
# namespace: example
#
##---------------------------- Elasticsearch Module ---------------------------
{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana']) | unique) %}
{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana'])) %}
- module: elasticsearch
metricsets: ["node", "node_stats"]
enabled: true
period: 30s
hosts: ["localhost:9200"]
hosts: ["localhost:{{ elastic_port }}"]
#
{% endif %}
##-------------------------------- Etcd Module --------------------------------

View File

@ -15,8 +15,8 @@
- name: add Elastic search public GPG key
apt_key:
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}"
url: "{{ elastic_repo.key_url }}"
state: "present"
register: _apt_task
until: _apt_task is success
retries: 3
@ -26,9 +26,9 @@
- name: add elk repo to apt sources list
apt_repository:
repo: "{{ elk_repo.repo }}"
state: "{{ elk_repo.state }}"
filename: "{{ elk_repo.filename | default(omit) }}"
repo: "{{ elastic_repo.repo }}"
state: "{{ elastic_repo.state }}"
filename: "{{ elastic_repo.filename | default(omit) }}"
register: _apt_task
until: _apt_task is success
retries: 3

View File

@ -18,3 +18,9 @@ elastic_repo_distro_packages:
elastic_repo_ppas:
- "ppa:openjdk-r/ppa"
# elk apt repo
elastic_repo:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}"
key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"

View File

@ -15,3 +15,9 @@
elastic_repo_distro_packages:
- apt-transport-https
# elk apt repo
elastic_repo:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}"
key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"

View File

@ -14,3 +14,37 @@
# limitations under the License.
elastic_index_retention_algorithm: default
### Elastic curator variables
## Default retention policy options. All retention options are in days.
# elastic_logstash_retention: 1
# elastic_apm_retention: 1
# elastic_auditbeat_retention: 1
# elastic_filebeat_retention: 1
# elastic_heartbeat_retention: 1
# elastic_journalbeat_retention: 1
# elastic_metricbeat_retention: 1
# elastic_packetbeat_retention: 1
# This is used to calculate the storage a beat could generate per node, per day.
# This constant is used as a multiplier. If the expected storage is larger than
# the actual available storage after the buffer is calculated the multiplier
# will be doubled there-by cutting the potential storage days in half.
elastic_beat_storage_constant: 512
## If any retention policy option is undefined a dynamic fact will be generated.
## Fact will be generated for the general retention using the storage constant
## per node, per index, where a given collector is expected to be deployed. The
## equation used will take the total available storage from the ES data nodes
## subtract 25% divided by the total number of data nodes. That is then divided
## by number of hosts assumed to be a beat target which is multiplied by the
## storage constant.
elastic_beat_retention_policy_hosts:
logstash: "{{ groups['elastic-logstash'] | default([null]) | length }}"
apm: "{{ groups['apm-server'] | default([null]) | length }}"
auditbeat: "{{ (groups['hosts'] | default([null]) | length) * 2 }}"
filebeat: "{{ (groups['hosts'] | default([null]) | length) * 2 }}"
heartbeat: "{{ groups['kibana'][:3] | default([null]) | length }}"
journalbeat: "{{ (groups['all'] | default([null]) | length) * 1.5 }}"
metricbeat: "{{ (groups['all'] | default([null]) | length) * 1.5 }}"
packetbeat: "{{ (groups['hosts'] | default([null]) | length) * 5 }}"

View File

@ -33,3 +33,7 @@
when:
- hostvars[inventory_hostname]["elastic_" + item.key + "_retention"] is undefined
with_dict: "{{ elastic_beat_retention_policy_hosts }}"
- name: Set retention keys fact
set_fact:
elastic_beat_retention_policy_keys: "{{ elastic_beat_retention_policy_hosts.keys() }}"

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
cluster_name: openstack_elk
elastic_log_rotate_path: "/var/log/elasticsearch"
temp_dir: /var/lib/elasticsearch/tmp

View File

@ -1,53 +1,12 @@
# elk apt repo
elk_repo:
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}"
---
#metricbeats monitoring endpoints
elastic_metricbeat_rabbitmq_monitoring_hosts: '"localhost:15672"'
elastic_metricbeat_haproxy_monitoring_hosts: '"unix:///var/run/haproxy.stat"'
# elastic search vars
apm_port: 8200
elastic_port: 9200
elastic_hap_port: 9201
cluster_name: openstack_elk
node_name: ${HOSTNAME}
elastic_data_interface: "{{ ansible_default_ipv4['alias'] }}"
### Elastic curator variables
## Default retention policy options. All retention options are in days.
# elastic_logstash_retention: 1
# elastic_apm_retention: 1
# elastic_auditbeat_retention: 1
# elastic_filebeat_retention: 1
# elastic_heartbeat_retention: 1
# elastic_journalbeat_retention: 1
# elastic_metricbeat_retention: 1
# elastic_packetbeat_retention: 1
# This is used to calculate the storage a beat could generate per node, per day.
# This constant is used as a multiplier. If the expected storage is larger than
# the actual available storage after the buffer is calculated the multiplier
# will be doubled there-by cutting the potential storage days in half.
elastic_beat_storage_constant: 512
## If any retention policy option is undefined a dynamic fact will be generated.
## Fact will be generated for the general retention using the storage constant
## per node, per index, where a given collector is expected to be deployed. The
## equation used will take the total available storage from the ES data nodes
## subtract 25% divided by the total number of data nodes. That is then divided
## by number of hosts assumed to be a beat target which is multiplied by the
## storage constant.
elastic_beat_retention_policy_hosts:
logstash: "{{ groups['elastic-logstash'] | default([null]) | length }}"
apm: "{{ groups['apm-server'] | default([null]) | length }}"
auditbeat: "{{ (groups['hosts'] | default([null]) | length) * 2 }}"
filebeat: "{{ (groups['hosts'] | default([null]) | length) * 2 }}"
heartbeat: "{{ groups['kibana'][:3] | default([null]) | length }}"
journalbeat: "{{ (groups['all'] | default([null]) | length) * 1.5 }}"
metricbeat: "{{ (groups['all'] | default([null]) | length) * 1.5 }}"
packetbeat: "{{ (groups['hosts'] | default([null]) | length) * 5 }}"
logstash_beat_input_port: 5044
logstash_syslog_input_port: 1514
kibana_port: 5601
kibana_nginx_port: 81
# This is the URL external services can use to communicate with the
# elasticsearch cluster.
@ -58,80 +17,6 @@ elastic_vip_url: >-
http://{{ hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ elastic_port }}
{% endif %}
#define this in host/group vars as needed to mount remote filesystems
#set the client address as appropriate, eth1 assumes osa container mgmt network
#mountpoints and server paths are just examples
#elastic_shared_fs_repos:
# - fstype: nfs4
# src: "<nfs-server-ip>:/esbackup"
# opts: clientaddr="{{ ansible_eth1['ipv4']['address'] }}"
# path: "/elastic-backup"
# state: mounted
# EXPERIMENTAL - When the heap size for a given elastic node is graeter than
# 4GiB the G1 garbage collector can be enabled. This is an
# experimental feature and may be removed later.
elastic_g1gc_enabled: false
# kibana vars
kibana_interface: 0.0.0.0
kibana_port: 5601
kibana_username: admin
kibana_password: admin
kibana_nginx_port: 81
kibana_server_name: "{{ ansible_hostname }}"
kibana_index_on_elasticsearch: "http://{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}:{{ elastic_port}}/.kibana"
kibana_elastic_request_timeout: 600000
# logstash vars
logstash_beat_input_port: 5044
logstash_deploy_filters: true
## Logstash config showing a complete kafka setup using SSL for authentication.
# logstash_kafka_options:
# codec: json
# topic_id: "elk_kafka"
# ssl_key_password: "{{ logstash_kafka_ssl_key_password }}"
# ssl_keystore_password: "{{ logstash_kafka_ssl_keystore_password }}"
# ssl_keystore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_keystore_location | basename }}"
# ssl_truststore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_truststore_location | basename }}"
# ssl_truststore_password: "{{ logstash_kafka_ssl_truststore_password }}"
# bootstrap_servers:
# - server1.local:9092
# - server2.local:9092
# - server3.local:9092
# client_id: "elk_metrics_6x"
# compression_type: "gzip"
# security_protocol: "SSL"
## The following variables are options that correspond to the
## `logstash_kafka_options` variable.
# logstash_kafka_ssl_key_password: "secrete"
# logstash_kafka_ssl_keystore_password: "secrete"
# logstash_kafka_ssl_truststore_password: "secrete"
# logstash_kafka_ssl_keystore_location: "/root/kafka/keystore.jks"
# logstash_kafka_ssl_truststore_location: "/root/kafka/truststore.jks"
## Setup servers that read events from the Smart Connector directly. This
## supports multiple entries in list format using the "host" and "port" for the
## smart connector.
# logstash_arcsight_smart_connectors:
# - host: 127.0.0.1
# port: 5000
logstash_arcsight_smart_connectors: []
## Setup servers to read events from the Eevnt Broker Stream. This
## multiple entries in list format using the "host" and "port" for the
## for the event brokers.
# logstash_arcsight_event_brokers:
# - host: 127.0.0.1
# port: 5000
logstash_arcsight_event_brokers: []
# APM vars
apm_interface: 0.0.0.0
apm_port: 8200
# Beat options
heartbeat_services:
- group: "{{ groups['galera_all'] | default([]) }}"
@ -362,11 +247,6 @@ heartbeat_services:
method: HEAD
path: "/"
# apm
apm_token: SuperSecrete
# Grafana
grafana_dashboards:
- dashboard_id: 5566