Add ELK stack and ELK client Ansible playbooks.

Fixes for this patchset:
- split out elk-openstack-client.yml to match what's done elsewhere

Fixes for patchset #11:
- split out filebeat into separate role for openstack clients
- update README.md to use elk-openstack-client.yml for this purpose
- cleanup filebeat.yml.j2 to use correct syntax (no need for " anymore)

Fixes for patchset #10:
- add SELinux boolean "httpd_can_network_connect"
- add libsemanage-python package dependency for booleans

Fixes for patchset #9:
- fix for RHEL7 clients, we need to specify remote EPEL rpm
- RHEL7 clients need rpm_key module to import EPEL GPG key
- switch to using uri module instead of curl for checking elasticsearch indices
- add python-httplib2 dependency (needed for uri module)
- use curl -XPOST instead of PUT for filebeat index template in elasticsearch

Fixes from patchset #7
- remove unneeded rpm usage, switch to yum module
- add logic to heapsize tuning so systems > 64G of memory will
  never exceed the 32G recommended heapsize
- logic fix for prepopulating local logs into logstash
- remove elasticsearch.yml, rpm provides this and we're not
  customizing it yet

Fixes from patchset #6:
- use yum repo Ansible module where we can
- remove unecessary EPEL installation (only nginx needs it)
- disable EPEL repo after installation to avoid OpenStack breakage

This adds:

(ELK Server)
- Automated ELK stack deployment
- SSL client generation
- Heap size tuning (1/2 of available memory)
- Firewall port additions (depending on active or not)
  - Supports either firewalld or iptables-services
- Additional upstream Filebeat Kibana dashboards

(ELK Client)
- Sets up filebeat with appropriate SSL certificates
  - utilizes both hostnames and SubjectAltName support (for environments without
    DNS services).

(Usage)

ansible-playbook -i hosts install/elk.yml
ansible-playbook -i hosts install/elk-client.yml --extra-vars 'elk_server=X.X.X.X'

Change-Id: Iee29f985e0bbcdf706ad869f132d4c0f1593a6b6
This commit is contained in:
Will Foster 2016-04-08 16:08:50 +01:00
parent 86dba51a41
commit 21f1f28ab3
28 changed files with 1308 additions and 2 deletions

View File

@ -13,7 +13,7 @@ Table of Contents
Currently we support Ansible 1.9.4 within browbeat-venv and ansible 2.0 for installation.
Playbooks for:
* Installing Browbeat, collectd, connmon, graphite, grafana, and grafana dashboards
* Installing Browbeat, collectd, connmon, ELK stack and clients, graphite, grafana, and grafana dashboards
* Check overcloud for performance issues
* Tune overcloud for performance (Experimental)
* Adjust number of workers for cinder/keystone/neutron/nova
@ -58,7 +58,15 @@ Requires Ansible 2.0
```
# ansible-playbook -i hosts install/connmon.yml
```
##### Install ELK Stack
```
ansible-playbook -i hosts install/elk.yml
```
##### Install ELK Clients
```
ansible-playbook -i hosts install/elk-openstack-client.yml --extra-vars 'elk_server=X.X.X.X'
```
- elk_server variable will be generated after the ELK stack playbook runs
##### Install graphite service
```
# ansible-playbook -i hosts install/graphite.yml

View File

@ -148,6 +148,20 @@ echo "IMPORTANT: If you plan on deploying graphite and grafana, update hosts and
echo " the [graphite] and [grafana] hosts entries are updated with valid hosts."
echo " You will need to have passwordless access to root on these hosts."
echo "---------------------------"
echo "" | tee -a ${ansible_inventory_file}
echo "[elk]" | tee -a ${ansible_inventory_file}
echo "## example host entry." | tee -a ${ansible_inventory_file}
echo "#host-01" | tee -a ${ansible_inventory_file}
echo "" | tee -a ${ansible_inventory_file}
echo "[elk-client]" | tee -a ${ansible_inventory_file}
echo "## example host entry." | tee -a ${ansible_inventory_file}
echo "#host-02" | tee -a ${ansible_inventory_file}
echo "---------------------------"
echo "IMPORTANT: If you plan on deploying ELK and ELK clients, update hosts and make sure"
echo " the [elk] and [elk-client] hosts entries are updated with valid hosts."
echo " You will need to have passwordless access to root on these hosts."
echo "---------------------------"
# Before referencing a host in ~/.ssh/config, ensure correct permissions on ssh config file
chmod 0600 ${ssh_config_file}

View File

@ -0,0 +1,9 @@
---
#
# Playbook to install the ELK stack
#
- hosts: elk-client
remote_user: root
roles:
- { role: filebeat }

View File

@ -0,0 +1,13 @@
---
#
# Playbook to install the ELK client
#
- hosts: undercloud
remote_user: "{{ local_remote_user }}"
roles:
- { role: filebeat }
- hosts: controller,compute,ceph
remote_user: "{{ host_remote_user }}"
roles:
- { role: filebeat }

12
ansible/install/elk.yml Normal file
View File

@ -0,0 +1,12 @@
---
#
# Playbook to install the ELK stack
#
- hosts: elk
remote_user: root
roles:
- { role: elasticsearch }
- { role: logstash }
- { role: nginx }
- { role: kibana }

View File

@ -0,0 +1,88 @@
#!/bin/sh
# check in case a user was using this mechanism
if [ "x$ES_CLASSPATH" != "x" ]; then
cat >&2 << EOF
Error: Don't modify the classpath with ES_CLASSPATH. Best is to add
additional elements via the plugin mechanism, or if code must really be
added to the main classpath, add jars to lib/ (unsupported).
EOF
exit 1
fi
ES_CLASSPATH="$ES_HOME/lib/elasticsearch-2.2.0.jar:$ES_HOME/lib/*"
if [ "x$ES_MIN_MEM" = "x" ]; then
ES_MIN_MEM=8g
fi
if [ "x$ES_MAX_MEM" = "x" ]; then
ES_MAX_MEM=8g
fi
if [ "x$ES_HEAP_SIZE" != "x" ]; then
ES_MIN_MEM=$ES_HEAP_SIZE
ES_MAX_MEM=$ES_HEAP_SIZE
fi
# min and max heap sizes should be set to the same value to avoid
# stop-the-world GC pauses during resize, and so that we can lock the
# heap in memory on startup to prevent any of it from being swapped
# out.
JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}"
JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}"
# new generation
if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}"
fi
# max direct memory
if [ "x$ES_DIRECT_SIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}"
fi
# set to headless, just in case
JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true"
# Force the JVM to use IPv4 stack
if [ "x$ES_USE_IPV4" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true"
fi
# Add gc options. ES_GC_OPTS is unsupported, for internal testing
if [ "x$ES_GC_OPTS" = "x" ]; then
ES_GC_OPTS="$ES_GC_OPTS -XX:+UseParNewGC"
ES_GC_OPTS="$ES_GC_OPTS -XX:+UseConcMarkSweepGC"
ES_GC_OPTS="$ES_GC_OPTS -XX:CMSInitiatingOccupancyFraction=75"
ES_GC_OPTS="$ES_GC_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
fi
JAVA_OPTS="$JAVA_OPTS $ES_GC_OPTS"
# GC logging options
if [ -n "$ES_GC_LOG_FILE" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDateStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
JAVA_OPTS="$JAVA_OPTS -Xloggc:$ES_GC_LOG_FILE"
# Ensure that the directory for the log file exists: the JVM will not create it.
mkdir -p "`dirname \"$ES_GC_LOG_FILE\"`"
fi
# Causes the JVM to dump its heap on OutOfMemory.
JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError"
# The path to the heap dump location, note directory must exists and have enough
# space for a full heap dump.
#JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof"
# Disables explicit GC
JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC"
# Ensure UTF-8 encoding by default (e.g. filenames)
JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8"
# Use our provided JNA always versus the system one
JAVA_OPTS="$JAVA_OPTS -Djna.nosys=true"

View File

@ -0,0 +1,6 @@
[elasticsearch-2.x]
name=Elasticsearch repository for 2.x packages
baseurl=http://packages.elastic.co/elasticsearch/2.x/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1

View File

@ -0,0 +1,50 @@
---
#
# Install/run elasticsearch for browbeat
#
- name: Copy elasticsearch yum repo file
copy:
src=elasticsearch.repo
dest=/etc/yum.repos.d/elasticsearch.repo
owner=root
group=root
mode=0644
become: true
- name: Install elasticsearch and openjdk
yum: name={{ item }} state=present
become: true
with_items:
- elasticsearch
- java-openjdk-headless
- name: Check if system memory is greater than 64G
debug: msg="System memory is {{ansible_memory_mb.real.total | int}} so setting heapsize to 32G upper limit"
when: ansible_memory_mb.real.total|int >= 65536
- name: Apply heapsize tuning for systems with greater than 64G memory
lineinfile: dest=/usr/share/elasticsearch/bin/elasticsearch.in.sh \
line="ES_HEAP_SIZE=32g" insertafter="^ES_CLASSPATH="
when: ansible_memory_mb.real.total|int >= 65536
register: elasticsearch_updated
- name: Print extended documentation for heapsize tuning
debug: msg="Refer to https://www.elastic.co/guide/en/elasticsearch/guide/current/_limiting_memory_usage.html"
when: ansible_memory_mb.real.total|int >= 65536
- name: Update elasticsearch startup with heap size
become: true
lineinfile: dest=/usr/share/elasticsearch/bin/elasticsearch.in.sh \
line="ES_HEAP_SIZE={{ (ansible_memory_mb.real.total / 2) | int }}m" insertafter="^ES_CLASSPATH="
when: ansible_memory_mb.real.total|int < 65536
register: elasticsearch_updated
- name: Start elasticsearch service
command: systemctl start elasticsearch.service
ignore_errors: true
when: elasticsearch_updated != 0
- name: Setup elasticsearch service
service: name=elasticsearch state=started enabled=true
become: true

View File

@ -0,0 +1,6 @@
[elk-client]
name=Elastic FileBeat Repository
baseurl=https://packages.elastic.co/beats/yum/el/$basearch
enabled=1
gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch
gpgcheck=1

View File

@ -0,0 +1,53 @@
---
#
# install/run filebeat elk client for browbeat
#
- name: Copy filebeat yum repo file
copy:
src=filebeat.repo
dest=/etc/yum.repos.d/filebeat.repo
owner=root
group=root
mode=0644
become: true
- name: Import filebeat GPG key
command: rpm --import http://packages.elastic.co/GPG-KEY-elasticsearch
ignore_errors: true
become: true
- name: Install filebeat rpms
yum: name={{ item }} state=present
become: true
with_items:
- filebeat
- name: Generate filebeat configuration template
template:
src=filebeat.yml.j2
dest=/etc/filebeat/filebeat.yml
owner=root
group=root
mode=0644
become: true
register: filebeat_needs_restart
- name: Check ELK server SSL client certificate
stat: path=/etc/pki/tls/certs/filebeat-forwarder.crt
ignore_errors: true
register: elk_client_ssl_cert_exists
- name: Install ELK server SSL client certificate
shell: curl http://"{{ elk_server }}":8080/filebeat-forwarder.crt > /etc/pki/tls/certs/filebeat-forwarder.crt
become: true
when: elk_client_ssl_cert_exists != 0
- name: Start filebeat service
command: systemctl start filebeat.service
ignore_errors: true
when: filebeat_needs_restart != 0
- name: Setup filebeat service
service: name=filebeat state=started enabled=true
become: true

View File

@ -0,0 +1,383 @@
################### Filebeat Configuration Example #########################
############################# Filebeat ######################################
filebeat:
# List of prospectors to fetch data.
prospectors:
# Each - is a prospector. Below are the prospector specific configurations
-
# Paths that should be crawled and fetched. Glob based paths.
# To fetch all ".log" files from a specific level of subdirectories
# /var/log/*/*.log can be used.
# For each file found under this path, a harvester is started.
# Make sure not file is defined twice as this can lead to unexpected behaviour.
paths:
- /var/log/*.log
- /var/log/messages
# foreman
- /var/log/foreman/*.log
- /var/log/foreman-proxy/*.log
# openstack
- /var/log/nova/*.log
- /var/log/neutron/*.log
- /var/log/cinder/*.log
- /var/log/keystone/*.log
- /var/log/horizon/*.log
- /var/log/glance/*.log
- /var/log/mariadb/*.log
- /var/log/rabbitmq/*.log
- /var/log/mongodb/*.log
- /var/log/ceilometer/*.log
- /var/log/ceph/*.log
- /var/log/heat/*.log
- /var/log/openvswitch/*.log
- /var/log/pcsd/*.log
- /var/log/puppet/*.log
- /var/log/redis/*.log
- /var/log/glusterfs/*.log
- /var/log/swift/*.log
# Configure the file encoding for reading files with international characters
# following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
# Some sample encodings:
# plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
# hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
#encoding: plain
# Type of the files. Based on this the way the file is read is decided.
# The different types cannot be mixed in one prospector
#
# Possible options are:
# * log: Reads every line of the log file (default)
# * stdin: Reads the standard in
input_type: log
# Optional additional fields. These field can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
# Set to true to store the additional fields as top level fields instead
# of under the "fields" sub-dictionary. In case of name conflicts with the
# fields added by Filebeat itself, the custom fields overwrite the default
# fields.
#fields_under_root: false
# Ignore files which were modified more then the defined timespan in the past
# Time strings like 2h (2 hours), 5m (5 minutes) can be used.
#ignore_older: 24h
# Type to be published in the 'type' field. For Elasticsearch output,
# the type defines the document type these entries should be stored
# in. Default: log
document_type: syslog
# Scan frequency in seconds.
# How often these files should be checked for changes. In case it is set
# to 0s, it is done as often as possible. Default: 10s
#scan_frequency: 10s
# Defines the buffer size every harvester uses when fetching the file
#harvester_buffer_size: 16384
# Setting tail_files to true means filebeat starts readding new files at the end
# instead of the beginning. If this is used in combination with log rotation
# this can mean that the first entries of a new file are skipped.
#tail_files: false
# Backoff values define how agressively filebeat crawls new files for updates
# The default values can be used in most cases. Backoff defines how long it is waited
# to check a file again after EOF is reached. Default is 1s which means the file
# is checked every second if new lines were added. This leads to a near real time crawling.
# Every time a new line appears, backoff is reset to the initial value.
#backoff: 1s
# Max backoff defines what the maximum backoff time is. After having backed off multiple times
# from checking the files, the waiting time will never exceed max_backoff idenependent of the
# backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
# file after having backed off multiple times, it takes a maximum of 10s to read the new line
#max_backoff: 10s
# The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
# the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
# The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
#backoff_factor: 2
# This option closes a file, as soon as the file name changes.
# This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
# issues when the file is removed, as the file will not be fully removed until also Filebeat closes
# the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
# same name can be created. Turning this feature on the other hand can lead to loss of data
# on rotate files. It can happen that after file rotation the beginning of the new
# file is skipped, as the reading starts at the end. We recommend to leave this option on false
# but lower the ignore_older value to release files faster.
#force_close_files: false
#-
# paths:
# - /var/log/apache/*.log
# type: log
#
# # Ignore files which are older then 24 hours
# ignore_older: 24h
#
# # Additional fields which can be freely defined
# fields:
# type: apache
# server: localhost
#-
# type: stdin
# paths:
# - "-"
# General filebeat configuration options
#
# Event count spool threshold - forces network flush if exceeded
#spool_size: 1024
# Defines how often the spooler is flushed. After idle_timeout the spooler is
# Flush even though spool_size is not reached.
#idle_timeout: 5s
# Name of the registry file. Per default it is put in the current working
# directory. In case the working directory is changed after when running
# filebeat again, indexing starts from the beginning again.
#registry_file: .filebeat
# Full Path to directory with additional prospector configuration files. Each file must end with .yml
# These config files must have the full filebeat config part inside, but only
# the prospector part is processed. All global options like spool_size are ignored.
# The config_dir MUST point to a different directory then where the main filebeat config file is in.
#config_dir:
###############################################################################
############################# Libbeat Config ##################################
# Base config file used by all other beats for using libbeat features
############################# Output ##########################################
# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.
output:
### Elasticsearch as output
#elasticsearch:
logstash:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
hosts: ["{{ elk_server }}:5044"]
bulk_max_size: 1024
# Optional protocol and basic auth credentials. These are deprecated.
#protocol: "https"
#username: "admin"
#password: "s3cr3t"
# Number of workers per Elasticsearch host.
#worker: 1
# Optional index name. The default is "filebeat" and generates
# [filebeat-]YYYY.MM.DD keys.
#index: "filebeat"
# Optional HTTP Path
#path: "/elasticsearch"
# Proxy server URL
# proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# The number of seconds to wait for new events between two bulk API index requests.
# If `bulk_max_size` is reached before this interval expires, addition bulk index
# requests are made.
#flush_interval: 1
# Boolean that sets if the topology is kept in Elasticsearch. The default is
# false. This option makes sense only for Packetbeat.
#save_topology: false
# The time to live in seconds for the topology information that is stored in
# Elasticsearch. The default is 15 seconds.
#topology_expire: 15
# tls configuration. By default is off.
tls:
# List of root certificates for HTTPS server verifications
certificate_authorities: ["/etc/pki/tls/certs/filebeat-forwarder.crt"]
# Certificate for TLS client authentication
#certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#certificate_key: "/etc/pki/client/cert.key"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
# Configure minimum TLS version allowed for connection to logstash
#min_version: 1.0
# Configure maximum TLS version allowed for connection to logstash
#max_version: 1.2
### Logstash as output
#logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Number of workers per Logstash host.
#worker: 1
# Optional load balance the events between the Logstash hosts
#loadbalance: true
# Optional index name. The default index name depends on the each beat.
# For Packetbeat, the default is set to packetbeat, for Topbeat
# top topbeat and for Filebeat to filebeat.
#index: filebeat
# Optional TLS. By default is off.
#tls:
# List of root certificates for HTTPS server verifications
#certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for TLS client authentication
#certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#certificate_key: "/etc/pki/client/cert.key"
# Controls whether the client verifies server certificates and host name.
# If insecure is set to true, all server host names and certificates will be
# accepted. In this mode TLS based connections are susceptible to
# man-in-the-middle attacks. Use only for testing.
#insecure: true
# Configure cipher suites to be used for TLS connections
#cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#curve_types: []
### File as output
#file:
# Path to the directory where to save the generated files. The option is mandatory.
#path: "/tmp/filebeat"
# Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
#filename: filebeat
# Maximum size in kilobytes of each file. When this size is reached, the files are
# rotated. The default value is 10 MB.
#rotate_every_kb: 10000
# Maximum number of files under path. When this number of files is reached, the
# oldest file is deleted and the rest are shifted from last to first. The default
# is 7 files.
#number_of_files: 7
### Console output
# console:
# Pretty print json event
#pretty: false
############################# Shipper #########################################
shipper:
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
# If this options is not defined, the hostname is used.
#name:
# The tags of the shipper are included in their own field with each
# transaction published. Tags make it easy to group servers by different
# logical properties.
#tags: ["service-X", "web-tier"]
# Uncomment the following if you want to ignore transactions created
# by the server on which the shipper is installed. This option is useful
# to remove duplicates if shippers are installed on multiple servers.
#ignore_outgoing: true
# How often (in seconds) shippers are publishing their IPs to the topology map.
# The default is 10 seconds.
#refresh_topology_freq: 10
# Expiration time (in seconds) of the IPs published by a shipper to the topology map.
# All the IPs will be deleted afterwards. Note, that the value must be higher than
# refresh_topology_freq. The default is 15 seconds.
#topology_expire: 15
# Configure local GeoIP database support.
# If no paths are not configured geoip is disabled.
#geoip:
#paths:
# - "/usr/share/GeoIP/GeoLiteCity.dat"
# - "/usr/local/var/GeoIP/GeoLiteCity.dat"
############################# Logging #########################################
# There are three options for the log ouput: syslog, file, stderr.
# Under Windos systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
logging:
# Send all logging output to syslog. On Windows default is false, otherwise
# default is true.
#to_syslog: true
# Write all logging output to files. Beats automatically rotate files if rotateeverybytes
# limit is reached.
#to_files: false
# To enable logging to files, to_files option has to be set to true
files:
# The directory where the log files will written to.
#path: /var/log/mybeat
# The name of the files where the logs are written to.
#name: mybeat
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
#keepfiles: 7
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are beat, publish, service
# Multiple selectors can be chained.
#selectors: [ ]
# Sets log level. The default log level is error.
# Available log levels are: critical, error, warning, info, debug
#level: error

View File

@ -0,0 +1,6 @@
[kibana-4.4]
name=Kibana repository for 4.4.x packages
baseurl=http://packages.elastic.co/kibana/4.4/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1

View File

@ -0,0 +1,6 @@
[logstash-2.2]
name=logstash repository for 2.2 packages
baseurl=http://packages.elasticsearch.org/logstash/2.2/centos
gpgcheck=1
gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
enabled=1

View File

@ -0,0 +1,114 @@
---
#
# Install/run kibana for browbeat
#
- name: Copy kibana yum repo file
copy:
src=kibana.repo
dest=/etc/yum.repos.d/kibana.repo
owner=root
group=root
mode=0644
become: true
# We need to insert data to create an initial index, query if it exists
- name: Check elasticsearch index for content
uri:
url=http://localhost:9200/_cat/indices
method=GET
return_content=yes
register: elasticsearch_index
# Populate with our own logs
- name: Populate elasticsearch index with local logs
shell: cat /var/log/messages | /opt/logstash/bin/logstash -f /etc/logstash/conf.d/10-syslog.conf
when: "'logstash-' not in elasticsearch_index.content"
- name: Install kibana rpms
yum: name={{ item }} state=present
become: true
with_items:
- kibana
- unzip
- name: Check kibana filebeat dashboards
stat: path=/tmp/filebeat-dashboards.zip
ignore_errors: true
register: kibana_dashboards_present
- name: Copy kibana filebeat dashboards
copy:
src=filebeat-dashboards.zip
dest=/tmp/filebeat-dashboards.zip
owner=root
group=root
mode=0644
become: true
ignore_errors: true
when: kibana_dashboards_present != 0
- name: Install kibana filebeat dashboards
unarchive: src=/tmp/filebeat-dashboards.zip dest=/tmp/ copy=no
ignore_errors: true
when: kibana_dashboards_present != 0
- name: Configure kibana filebeat dashboards
shell: sh /tmp/beats-dashboards-master/load.sh -url "http://localhost:9200" -user "admin:admin"
ignore_errors: true
- name: Check kibana users
stat: path=/etc/nginx/htpasswd.users
ignore_errors: true
register: kibana_user_pwfile_exists
- name: Create kibana admin user
command: htpasswd -b -c /etc/nginx/htpasswd.users admin admin
ignore_errors: true
when: kibana_user_pwfile_exists != 0
- name: Setup kibana service
service: name=kibana state=started enabled=true
become: true
- name: Check Filebeat forwarder SSL certificate
stat: path=/etc/pki/tls/certs/filebeat-forwarder.crt
ignore_errors: true
register: filebeat_forwarder_ssl_exists
- name: Create client forwarder SSL certificate
command: openssl req -subj '/CN={{ ansible_hostname }}/' -config /etc/pki/tls/openssl_extras.cnf \
-x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout /etc/pki/tls/private/filebeat-forwarder.key \
-out /etc/pki/tls/certs/filebeat-forwarder.crt
ignore_errors: true
when: filebeat_forwarder_ssl_exists != 0
- name: Check Filebeat forwarder SSL certificate copy
stat: path=/usr/share/nginx/html/filebeat-forwarder.crt
ignore_errors: true
register: filebeat_forwarder_ssl_client_copy_exists
- name: Copy Filebeat forwarder SSL certificate
command: cp /etc/pki/tls/certs/filebeat-forwarder.crt /usr/share/nginx/html/filebeat-forwarder.crt
ignore_errors: true
when: filebeat_forwarder_ssl_client_copy_exists != 0
- name: Refresh logstash service
command: systemctl restart logstash.service
ignore_errors: true
become: true
- name: Print SSL post-setup information
debug: msg="Filebeat SSL Certificate available at http://{{ ansible_hostname }}:8080/filebeat-forwarder.crt"
- name: Print post-setup URL
debug: msg="*** ELK Services available at http://{{ ansible_hostname }}/ ***"
- name: Print index creation instructions
debug: msg="** 1) Navigate to http://{{ ansible_hostname }} and login with admin/admin, click 'create' on the green index button ***"
- name: Print filebeat openstack client setup instructions
debug: msg="** 2) Run ansible-playbook -i hosts install/elk-openstack-client.yml --extra-vars 'elk_server={{ ansible_default_ipv4.address }}' to setup OpenStack clients ***"
- name: Print filebeat client setup instructions
debug: msg="** 2) Run ansible-playbook -i hosts install/elk-client.yml --extra-vars 'elk_server={{ ansible_default_ipv4.address }}' to setup clients ***"

View File

@ -0,0 +1,8 @@
input {
lumberjack {
port => 5043
type => "logs"
ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt"
ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key"
}
}

View File

@ -0,0 +1,8 @@
input {
beats {
port => 5044
ssl => true
ssl_certificate => "/etc/pki/tls/certs/filebeat-forwarder.crt"
ssl_key => "/etc/pki/tls/private/filebeat-forwarder.key"
}
}

View File

@ -0,0 +1,14 @@
filter {
if [type] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}

View File

@ -0,0 +1,11 @@
input {
stdin {
type => "syslog"
}
}
output {
stdout {codec => rubydebug }
elasticsearch {
hosts => "localhost:9200"
}
}

View File

@ -0,0 +1,9 @@
output {
elasticsearch {
hosts => ["localhost:9200"]
sniffing => true
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}

View File

@ -0,0 +1,3 @@
output {
elasticsearch { hosts => ["localhost:9200"] }
}

View File

@ -0,0 +1,49 @@
{
"mappings": {
"_default_": {
"_all": {
"enabled": true,
"norms": {
"enabled": false
}
},
"dynamic_templates": [
{
"template1": {
"mapping": {
"doc_values": true,
"ignore_above": 1024,
"index": "not_analyzed",
"type": "{dynamic_type}"
},
"match": "*"
}
}
],
"properties": {
"@timestamp": {
"type": "date"
},
"message": {
"type": "string",
"index": "analyzed"
},
"offset": {
"type": "long",
"doc_values": "true"
},
"geoip" : {
"type" : "object",
"dynamic": true,
"properties" : {
"location" : { "type" : "geo_point" }
}
}
}
}
},
"settings": {
"index.refresh_interval": "5s"
},
"template": "filebeat-*"
}

View File

@ -0,0 +1,6 @@
[logstash-2.2]
name=logstash repository for 2.2 packages
baseurl=http://packages.elasticsearch.org/logstash/2.2/centos
gpgcheck=1
gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
enabled=1

View File

@ -0,0 +1,167 @@
---
#
# Install/run logstash for browbeat
#
- name: Copy logstash yum repo file
copy:
src=logstash.repo
dest=/etc/yum.repos.d/logstash.repo
owner=root
group=root
mode=0644
become: true
- name: Install logstash rpms
yum: name={{ item }} state=present
become: true
with_items:
- logstash
- name: Copy logstash input filters
copy:
src=01-lumberjack-input.conf
dest=/etc/logstash/conf.d/01-lumberjack-input.conf
owner=root
group=root
mode=0644
become: true
- name: Copy logstash output filters
copy:
src=30-elasticsearch-output.conf
dest=/etc/logstash/conf.d/30-lumberjack-output.conf
owner=root
group=root
mode=0644
become: true
- name: Copy logstash syslog filters
copy:
src=10-syslog.conf
dest=/etc/logstash/conf.d/10-syslog.conf
owner=root
group=root
mode=0644
become: true
- name: Copy logstash local syslog filter
copy:
src=10-syslog-filter.conf
dest=/etc/logstash/conf.d/10-syslog-filter.conf
owner=root
group=root
mode=0644
become: true
register: logstash_needs_restart
- name: Copy filebeat input filter
copy:
src=02-beats-input.conf
dest=/etc/logstash/conf.d/02-beats-input.conf
owner=root
group=root
mode=0644
become: true
- name: Stage filebeat JSON index template
copy:
src=filebeat-index-template.json
dest=/tmp/filebeat-index-template.json
owner=root
group=root
mode=0644
become: true
- name: Load OpenSSL CA Extended Configuration
template:
src=openssl_extras.cnf.j2
dest=/etc/pki/tls/openssl_extras.cnf
owner=root
group=root
mode=0644
become: true
- name: Check OpenSSL SANs (SubjectAltName) entry for CA
shell: grep "{{ ansible_default_ipv4.address }}" /etc/pki/tls/openssl.cnf | wc -l
ignore_errors: true
register: subjectAltName_exists
- name: Add OpenSSL SANs (SubjectAltName) entry for CA
lineinfile:
dest: /etc/pki/tls/openssl.cnf
line: 'subjectAltName = "{{ ansible_default_ipv4.address }}"'
regexp: '^ Extensions for a typical CA'
insertbefore: '# Extensions for a typical CA'
backup: yes
when: subjectAltName_exists.stdout|int == 0
# note: we can't currently use the Ansible uri module here, curl is a workaround
# https://github.com/ansible/ansible-modules-core/issues/265
# http://stackoverflow.com/questions/28997007/translate-curl-put-into-ansible-uri-module
- name: Load filebeat JSON index template
command: curl -XPOST 'http://localhost:9200/_template/filebeat?pretty' -d@/tmp/filebeat-index-template.json
ignore_errors: true
become: true
- name: Refresh logstash service
command: systemctl restart logstash.service
ignore_errors: true
become: true
- name: Setup logstash service
service: name=logstash state=started enabled=true
become: true
# we need TCP/80 and TCP/8080 open
# determine firewall status and take action
# 1) use firewall-cmd if firewalld is utilized
# 2) insert iptables rule if iptables is used
# Firewalld
- name: Determine if firewalld is in use
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
- name: Determine if TCP/5044 is already active
shell: firewall-cmd --list-ports | egrep -q "^5044/tcp"
ignore_errors: true
register: firewalld_tcp5044_exists
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/5044 (firewalld)
command: "{{ item }}"
with_items:
- firewall-cmd --zone=public --add-port=5044/tcp --permanent
- firewall-cmd --reload
ignore_errors: true
become: true
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp5044_exists.rc != 0
# iptables-services
- name: check firewall rules for TCP/5044 (iptables-services)
shell: grep "dport 5044 \-j ACCEPT" /etc/sysconfig/iptables | wc -l
ignore_errors: true
register: iptables_tcp5044_exists
failed_when: iptables_tcp5044_exists == 127
- name: Add firewall rule for TCP/5044 (iptables-services)
lineinfile:
dest: /etc/sysconfig/iptables
line: '-A INPUT -p tcp -m tcp --dport 5044 -j ACCEPT'
regexp: '^INPUT -i lo -j ACCEPT'
insertbefore: '-A INPUT -i lo -j ACCEPT'
backup: yes
when: firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0 and iptables_tcp5044_exists.stdout|int == 0
register: iptables_needs_restart
- name: Restart iptables-services for TCP/5044 (iptables-services)
shell: systemctl restart iptables.service
ignore_errors: true
when: iptables_needs_restart != 0 and firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0

View File

@ -0,0 +1,27 @@
[req]
distinguished_name = req_distinguished_name
x509_extensions = v3_req
prompt = no
[req_distinguished_name]
C = TG
ST = Togo
L = Lome
O = Private company
CN = *
[v3_req]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
basicConstraints = CA:TRUE
subjectAltName = @alt_names
[alt_names]
DNS.1 = *
DNS.2 = *.*
DNS.3 = *.*.*
DNS.4 = *.*.*.*
DNS.5 = *.*.*.*.*
DNS.6 = *.*.*.*.*.*
DNS.7 = *.*.*.*.*.*.*
IP.1 = {{ ansible_default_ipv4.address }}

View File

@ -0,0 +1,55 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 8080 default_server;
listen [::]:8080 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
}

View File

@ -0,0 +1,164 @@
---
#
# Install/run nginx for browbeat
#
- name: Import EPEL GPG Key
rpm_key: key=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
state=present
- name: Check for EPEL repo
yum: name=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
state=present
- name: Install nginx, httpd-tools, httplib2, libsemanage-python
yum: name={{ item }} state=present
become: true
with_items:
- nginx
- httpd-tools
- python-httplib2
- libsemanage-python
# SELinux boolean for nginx
- name: Apply SELinux boolean httpd_can_network_connect
seboolean: name=httpd_can_network_connect state=yes persistent=yes
# deploy kibana.conf with FQDN
- name: Setup nginx reverse proxy for kibana
template:
src=kibana.conf.j2
dest=/etc/nginx/conf.d/kibana.conf
owner=root
group=root
mode=0644
become: true
register: nginx_needs_restart
# deploy basic nginx.conf 8080 vhost
- name: Setup nginx TCP/8080 vhost for SSL certificate
copy:
src=nginx.conf
dest=/etc/nginx/nginx.conf
owner=root
group=root
mode=0644
ignore_errors: true
become: true
# start nginx service
- name: Start nginx service
command: systemctl start nginx.service
ignore_errors: true
when: nginx_needs_restart != 0
- name: Set nginx to start on boot
command: systemctl enable nginx.service
ignore_errors: true
# we need TCP/80 and TCP/8080 open
# determine firewall status and take action
# 1) use firewall-cmd if firewalld is utilized
# 2) insert iptables rule if iptables is used
# Firewalld
- name: Determine if firewalld is in use
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
- name: Determine if TCP/80 is already active
shell: firewall-cmd --list-ports | egrep -q "^80/tcp"
ignore_errors: true
register: firewalld_tcp80_exists
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/80 (firewalld)
command: "{{ item }}"
with_items:
- firewall-cmd --zone=public --add-port=80/tcp --permanent
- firewall-cmd --reload
ignore_errors: true
become: true
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp80_exists.rc != 0
# iptables-services
- name: check firewall rules for TCP/80 (iptables-services)
shell: grep "dport 80 \-j ACCEPT" /etc/sysconfig/iptables | wc -l
ignore_errors: true
register: iptables_tcp80_exists
failed_when: iptables_tcp80_exists == 127
- name: Add firewall rule for TCP/80 (iptables-services)
lineinfile:
dest: /etc/sysconfig/iptables
line: '-A INPUT -p tcp -m tcp --dport 80 -j ACCEPT'
regexp: '^INPUT -i lo -j ACCEPT'
insertbefore: '-A INPUT -i lo -j ACCEPT'
backup: yes
when: firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0 and iptables_tcp80_exists.stdout|int == 0
register: iptables_needs_restart
- name: Restart iptables-services for TCP/80 (iptables-services)
shell: systemctl restart iptables.service
ignore_errors: true
when: iptables_needs_restart != 0 and firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0
# Firewalld
- name: Determine if firewalld is in use
shell: systemctl is-enabled firewalld.service | egrep -qv 'masked|disabled'
ignore_errors: true
register: firewalld_in_use
- name: Determine if firewalld is active
shell: systemctl is-active firewalld.service | grep -vq inactive
ignore_errors: true
register: firewalld_is_active
- name: Determine if TCP/8080 is already active
shell: firewall-cmd --list-ports | egrep -q "^8080/tcp"
ignore_errors: true
register: firewalld_tcp8080_exists
# add firewall rule via firewall-cmd
- name: Add firewall rule for TCP/8080 (firewalld)
command: "{{ item }}"
with_items:
- firewall-cmd --zone=public --add-port=8080/tcp --permanent
- firewall-cmd --reload
ignore_errors: true
become: true
when: firewalld_in_use.rc == 0 and firewalld_is_active.rc == 0 and firewalld_tcp8080_exists.rc != 0
# iptables-services
- name: check firewall rules for TCP/8080 (iptables-services)
shell: grep "dport 8080 \-j ACCEPT" /etc/sysconfig/iptables | wc -l
ignore_errors: true
register: iptables_tcp8080_exists
failed_when: iptables_tcp8080_exists == 127
- name: Add firewall rule for TCP/8080 (iptables-services)
lineinfile:
dest: /etc/sysconfig/iptables
line: '-A INPUT -p tcp -m tcp --dport 8080 -j ACCEPT'
regexp: '^INPUT -i lo -j ACCEPT'
insertbefore: '-A INPUT -i lo -j ACCEPT'
backup: yes
when: firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0 and iptables_tcp8080_exists.stdout|int == 0
register: iptables_needs_restart
- name: Restart iptables-services for TCP/8080 (iptables-services)
shell: systemctl restart iptables.service
ignore_errors: true
when: iptables_needs_restart != 0 and firewalld_in_use.rc != 0 and firewalld_is_active.rc != 0
- name: Disable EPEL Repo
ini_file: dest=/etc/yum.repos.d/epel.repo
section=epel
option=enabled
value=0

View File

@ -0,0 +1,17 @@
server {
listen 80;
server_name {{ansible_hostname}};
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/htpasswd.users;
location / {
proxy_pass http://localhost:5601;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}