Added playbooks to install ELK stack with topbeat

This set of playbooks install an Elasticsearch cluster,
Logstash and a kibana dashboard inside containers
and then install Topbeat in your cloud to ship system metrics
to the Elastic cluster.

Change-Id: I0c8c853ee48bd9278bd7b08719be4bde5f8c3df6
This commit is contained in:
Ala Raddaoui 2016-10-07 04:05:08 +00:00 committed by Jesse Pretorius
parent eacb01c0a0
commit b7a6ceb086
19 changed files with 672 additions and 0 deletions

View File

@ -0,0 +1,10 @@
elastic-logstash_hosts:
logging01:
ip: 172.22.8.27
logging02:
ip: 172.22.8.28
logging03:
ip: 172.22.8.29
kibana_hosts:
logging01:
ip: 172.22.8.27

38
elk_metrics/env.d/elk.yml Normal file
View File

@ -0,0 +1,38 @@
---
component_skel:
elastic-logstash:
belongs_to:
- elk_all
kibana:
belongs_to:
- elk_all
container_skel:
elastic-logstash_container:
belongs_to:
- elastic-logstash_containers
contains:
- elastic-logstash
properties:
container_fs_size: 150G
kibana_container:
belongs_to:
- kibana_containers
contains:
- kibana
properties:
container_fs_size: 10G
physical_skel:
elastic-logstash_containers:
belongs_to:
- all_containers
kibana_containers:
belongs_to:
- all_containers
elastic-logstash_hosts:
belongs_to:
- hosts
kibana_hosts:
belongs_to:
- hosts

View File

@ -0,0 +1,45 @@
---
- name: install ElK stack
hosts: "{{ elk_hosts }}"
become: true
vars_files:
- vars/variables.yml
tasks:
- name: elasticsearch datapath bind mount
lxc_container:
name: "{{ inventory_hostname }}"
container_command: |
[[ ! -d "/var/lib/elasticsearch" ]] && mkdir -p "/var/lib/elasticsearch"
[[ ! -d "/var/lib/elasticsearch-olddata" ]] && mkdir -p "/var/lib/elasticsearch-olddata"
container_config:
- "lxc.mount.entry=/openstack/{{ inventory_hostname }} var/lib/elasticsearch none bind 0 0"
delegate_to: "{{ physical_host }}"
- name: Add Oracle Java PPA to apt sources list
apt_repository: repo='ppa:webupd8team/java' state=present
- name: Accept Java 8 License
debconf: name='oracle-java8-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
- name: Ensure Java is installed.
apt: name=oracle-java8-installer state=present install_recommends=yes update_cache=yes
- name: add Elastic search public GPG key
apt_key:
url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
state: "present"
- name: add Elastic search repo to apt sources list
apt_repository: repo='deb http://packages.elastic.co/elasticsearch/2.x/debian stable main' state=present
- name: Ensure Elastic search is installed.
apt: name=elasticsearch state=present update_cache=yes
- name: Drop elastic search conf file
template:
src: templates/elasticsearch.yml.j2
dest: /etc/elasticsearch/elasticsearch.yml
- name: Enable and restart elastic
service:
name: "elasticsearch"
enabled: true
state: restarted
- name: copy elk-data rotater script
copy: src=templates/rotate-topbeatdata.sh dest=/root/rotate-topbeatdata.sh mode=0755
when: node_data | bool
- name: setup a cron job to use topbeat-data rotater script daily
cron: name="compress old topbeat data" minute="55" hour="23" job="/root/rotate-topbeatdata.sh"
when: node_data | bool

View File

@ -0,0 +1,27 @@
---
- name: install kibana
hosts: kibana
become: true
vars_files:
- vars/variables.yml
tasks:
- name: add Elastic search public GPG key
apt_key:
url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
state: "present"
- name: add kibana repo to apt sources list
apt_repository: repo='deb http://packages.elastic.co/kibana/4.4/debian stable main' state=present
- name: Ensure kibana is installed.
apt: name=kibana state=present update_cache=yes
- name: Drop kibana conf file
template:
src: templates/kibana.yml.j2
dest: /opt/kibana/config/kibana.yml
mode: "u=rw,g=rw,o=rw"
- name: Enable and restart kibana
service:
name: "kibana"
enabled: true
state: restarted

View File

@ -0,0 +1,40 @@
---
- name: install ElK stack
hosts: elastic-logstash
become: true
vars_files:
- vars/variables.yml
tasks:
- name: add Logstash to apt sources list
apt_repository: repo='deb http://packages.elastic.co/logstash/2.2/debian stable main' state=present
- name: Ensure Logstash is installed.
apt: name=logstash state=present update_cache=yes
- name: Drop Logstash conf for beats input
template:
src: templates/02-beats-input.conf.j2
dest: /etc/logstash/conf.d/02-beats-input.conf
- name: Drop Logstash conf for beats input
template:
src: templates/10-syslog-filter.conf.j2
dest: /etc/logstash/conf.d/10-syslog-filter.conf
- name: Drop Logstash conf for beats output
template:
src: templates/30-elasticsearch-output.conf.j2
dest: /etc/logstash/conf.d/30-elasticsearch-output.conf
- shell: service logstash configtest
register: conf_success
- debug: var=conf_success
- name: Enable and restart logstash
service:
name: "logstash"
enabled: true
state: restarted

View File

@ -0,0 +1,24 @@
---
- name: topbeat
hosts: hosts
become: true
vars_files:
- vars/variables.yml
tasks:
- name: add topbeat repo to apt sources list
apt_repository: repo='deb https://packages.elastic.co/beats/apt stable main' state=present
- name: add Elastic search public GPG key (same for Topbeat)
apt_key:
url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
state: "present"
- name: Ensure Topbeat is installed.
apt: name=topbeat state=present update_cache=yes
- name: Drop Topbeat conf file
template:
src: templates/topbeat.yml.j2
dest: /etc/topbeat/topbeat.yml
- name: Enable and restart topbeat
service:
name: "topbeat"
enabled: true
state: restarted

View File

@ -0,0 +1,25 @@
---
- name: load beats dashboards to Kibana
hosts: kibana
become: true
vars_files:
- vars/variables.yml
tasks:
- name: download sample dashboards
get_url:
url: https://download.elastic.co/beats/dashboards/beats-dashboards-1.1.0.zip
dest: /root/
- name: Ensure unzip is installed.
apt: name=unzip state=present
- name: extract archive
command: unzip -o /root/beats-dashboards-1.1.0.zip -d /root/
- command: ./load.sh
args:
chdir: /root/beats-dashboards-1.1.0
- name: copy topbeat index template in Elastic search
copy: src=templates/topbeat.template.json dest=/root mode=0644
- name: load it in elasticsearch
command: "curl -XPUT 'http://localhost:{{ elastic_port }}/_template/topbeat' -d@topbeat.template.json"
args:
chdir: /root/

88
elk_metrics/readme.rst Normal file
View File

@ -0,0 +1,88 @@
install Elk stack with topbeat to gather metrics
#################################################
:tags: openstack, ansible
About this repository
---------------------
This set of playbooks will deploy elk cluster (Elasticsearch, Logstash, Kibana) with topbeat to gather metrics from hosts metrics to the ELK cluster.
Process
-------
Clone the elk-osa repo
.. code-block:: bash
cd /opt
git clone https://github.com/openstack/openstack-ansible-ops
Copy the env.d file into place
.. code-block:: bash
cd openstack-ansible-ops
cp env.d/elk.yml /etc/openstack_deploy/env.d/
Copy the conf.d file into place
.. code-block:: bash
cp conf.d/elk.yml /etc/openstack_deploy/conf.d/
In **elk.yml**, list your logging hosts under elastic-logstash_hosts to create the elasticsearch cluster in multiple containers and one logging host under kibana_hosts to create the kibana container
.. code-block:: bash
vi /etc/openstack_deploy/conf.d/elk.yml
Create the containers
.. code-block:: bash
cd /opt/openstack-ansible-playbooks
openstack-ansible lxc-containers-create.yml -e 'container_group=elastic-logstash:kibana'
install master/data elasticsearch nodes on the elastic-logstash containers
.. code-block:: bash
cd /opt/openstack-ansible-ops
openstack-ansible installElastic.yml -e elk_hosts=elastic-logstash -e node_master=true -e node_data=true
Install an Elasticsearch client on the kibana container to serve as a loadbalancer for the Kibana backend server
.. code-block:: bash
openstack-ansible installElastic.yml -e elk_hosts=kibana -e node_master=false -e node_data=false
Install Logstash on all the elastic containers
.. code-block:: bash
openstack-ansible installLogstash.yml
InstallKibana on the kibana container
.. code-block:: bash
openstack-ansible installKibana.yml
(Optional) Reverse proxy kibana container to your loadbalancer host
.. code-block:: bash
openstack-ansible reverseProxyKibana.yml
load topbeat indices into elastic-search and kibana
.. code-block:: bash
openstack-ansible loadKibana.yml
install Topbeat everywhere to start shipping metrics to our logstash instances
.. code-block:: bash
openstack-ansible installTopbeat.yml --forks 100

View File

@ -0,0 +1,25 @@
---
- name: add reverse proxy to kibana dashboard
hosts: haproxy_all
become: true
tags: nginx-setup
vars_files:
- vars/variables.yml
tasks:
- name: Ensure Nginx is installed.
apt: name={{ item }} state=present update_cache=yes
with_items:
- nginx
- apache2-utils
- python-passlib
- name: create kibana user to access web interface
htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644
- name: Drop Nginx default conf file
template:
src: templates/nginx_default.j2
dest: /etc/nginx/sites-available/default
- name: Enable and restart nginx
service:
name: "nginx"
enabled: true
state: restarted

View File

@ -0,0 +1,5 @@
input {
beats {
port => {{ logstash_beat_input_port }}
}
}

View File

@ -0,0 +1,13 @@
filter {
if [type] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}

View File

@ -0,0 +1,9 @@
output {
elasticsearch {
hosts => {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
sniffing => true
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}

View File

@ -0,0 +1,66 @@
# ---------------------------------- Cluster -----------------------------------
cluster.name: {{ cluster_name }}
# ------------------------------------ Node ------------------------------------
node.name: {{ node_name }}
# node.rack: r1
# ----------------------------------- Paths ------------------------------------
# Path to directory where to store the data (separate multiple locations by comma):
#
# path.data: /path/to/data
#
# Path to log files:
#
# path.logs: /path/to/logs
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
# bootstrap.memory_lock: true
#
# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
# available on the system and that the owner of the process is allowed to use this limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
network.host: {{ elastic_interface }}
# Set a custom port for HTTP:
http.port: {{ elastic_port }}
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
discovery.zen.ping.unicast.hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | join(', ') }}]
node.master: {{ node_master | default(true) }}
node.data: {{ node_data | default(true) }}
#
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
#
# discovery.zen.minimum_master_nodes: 3
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
# gateway.recover_after_nodes: 3
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
#
# ---------------------------------- Various -----------------------------------
#
# Disable starting multiple nodes on a single system:
#
# node.max_local_storage_nodes: 1
#
# Require explicit names when deleting indices:
#
# action.destructive_requires_name: true

View File

@ -0,0 +1,81 @@
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: {{ kibana_port }}
# This setting specifies the IP address of the back end server.
server.host: {{ kibana_interface }}
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This setting
# cannot end in a slash.
# server.basePath: ""
# The maximum payload size in bytes for incoming server requests.
# server.maxPayloadBytes: 1048576
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://localhost:{{ elastic_port }}"
# When this settings value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
# elasticsearch.preserveHost: true
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesnt already exist.
# kibana.index: ".kibana"
# The default application to load.
# kibana.defaultAppId: "discover"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
# elasticsearch.username: "user"
# elasticsearch.password: "pass"
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
# files enable SSL for outgoing requests from the Kibana server to the browser.
# server.ssl.cert: /path/to/your/server.crt
# server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
# elasticsearch.ssl.cert: /path/to/your/client.crt
# elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
# elasticsearch.ssl.ca: /path/to/your/CA.pem
# To disregard the validity of SSL certificates, change this settings value to false.
# elasticsearch.ssl.verify: true
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
# elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
# elasticsearch.requestTimeout: 300000
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
# elasticsearch.shardTimeout: 0
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
# elasticsearch.startupTimeout: 5000
# Specifies the path where Kibana creates the process ID file.
# pid.file: /var/run/kibana.pid
# Enables you specify a file where Kibana stores log output.
# logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
# logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
# logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
# logging.verbose: false

View File

@ -0,0 +1,17 @@
server {
listen {{ nginx_port }};
server_name {{ server_name }};
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/htpasswd.users;
location / {
proxy_pass http://{{ kibana_private_ip }}:{{ kibana_port }};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}

View File

@ -0,0 +1,6 @@
#!/bin/bash
for d in $(find /var/lib/elasticsearch/openstack_elk/nodes/0/indices/ -maxdepth 1 -type d -mtime 5 | grep topbeat); do
echo $d
tar -zcvf $d.tar.gz $d && rm -r $d
mv $d.tar.gz /var/lib/elasticsearch-olddata/;
done

View File

@ -0,0 +1,114 @@
{
"mappings": {
"_default_": {
"_all": {
"enabled": true,
"norms": {
"enabled": false
}
},
"dynamic_templates": [
{
"template1": {
"mapping": {
"doc_values": true,
"ignore_above": 1024,
"index": "not_analyzed",
"type": "{dynamic_type}"
},
"match": "*"
}
}
],
"properties": {
"@timestamp": {
"type": "date"
},
"cpu": {
"properties": {
"system_p": {
"doc_values": "true",
"type": "float"
},
"user_p": {
"doc_values": "true",
"type": "float"
}
}
},
"fs": {
"properties": {
"used_p": {
"doc_values": "true",
"type": "float"
}
}
},
"load": {
"properties": {
"load1": {
"doc_values": "true",
"type": "float"
},
"load15": {
"doc_values": "true",
"type": "float"
},
"load5": {
"doc_values": "true",
"type": "float"
}
}
},
"mem": {
"properties": {
"actual_used_p": {
"doc_values": "true",
"type": "float"
},
"used_p": {
"doc_values": "true",
"type": "float"
}
}
},
"proc": {
"properties": {
"cpu": {
"properties": {
"user_p": {
"doc_values": "true",
"type": "float"
}
}
},
"mem": {
"properties": {
"rss_p": {
"doc_values": "true",
"type": "float"
}
}
}
}
},
"swap": {
"properties": {
"actual_used_p": {
"doc_values": "true",
"type": "float"
},
"used_p": {
"doc_values": "true",
"type": "float"
}
}
}
}
}
},
"settings": {
"index.refresh_interval": "5s"
},
"template": "topbeat-*"
}

View File

@ -0,0 +1,18 @@
input:
period: 10
procs: [".*"]
stats:
system: true
proc: true
filesystem: true
output:
logstash:
hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_ssh_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
shipper:
logging:
files:
rotateeverybytes: 10485760 # = 10MB

View File

@ -0,0 +1,21 @@
# elastic search vars
elastic_interface: "['_eth1_', '_local_']"
elastic_port: 9200
cluster_name: openstack_elk
node_name: ${HOSTNAME}
# kibana vars
kibana_interface: 0.0.0.0
kibana_port: 5601
kibana_username: kibanaadmin
kibana_password: secrete
nginx_port: 81
server_name: server_name
kibana_private_ip: "{{ hostvars[groups['kibana'][0]]['ansible_ssh_host'] }}"
logstash_ssl_self_signed_subject: "/C=US/ST=Texas/L=San Antonio/O=IT/CN={{ server_name }}/subjectAltName=IP.1={{ elk_server_private_ip }}"
logstash_beat_input_port: 5044