This adds the ability to substitute fluentd for logstash for ELK stack deployments. This is a seamless substitution, but logstash is still the default if no options are changed. Rsyslog will be utilized instead of the official Filebeat logstash forwarder. To use fluentd instead of logstash change the following parameter in install/group_vars/all.yml: logging_backend: fluentd In addition, the following enhancements were made: * All service ports are now configurable * Firewall ports are now configurable Documentation updates can be viewed here: http://rst.ninjs.org/?n=aa3ea354e28f4ef11d2a03344d8c34be&theme=basic Patchset #2: clean up commit message character length. Patchset #3: explicitly state logstash is the default Patchset #4: clean up browbeat install instructions Patchset #5: add /etc/rsyslog.d/openstack-logs.conf that will pickup common openstack logs in /var/log/* Change-Id: Ife928c1f6699e0c675d44e857ccb6aaff165752d
141 lines
4.6 KiB
YAML
141 lines
4.6 KiB
YAML
---
|
|
#
|
|
# Install/run kibana for browbeat
|
|
#
|
|
|
|
- name: Copy kibana yum repo file
|
|
copy:
|
|
src=kibana.repo
|
|
dest=/etc/yum.repos.d/kibana.repo
|
|
owner=root
|
|
group=root
|
|
mode=0644
|
|
become: true
|
|
|
|
# We need to insert data to create an initial index, query if it exists
|
|
- name: Check elasticsearch index for content
|
|
uri:
|
|
url=http://localhost:9200/_cat/indices
|
|
method=GET
|
|
return_content=yes
|
|
register: elasticsearch_index
|
|
|
|
# Populate elasticsearch with local logs if using logstash
|
|
- name: Populate elasticsearch index with local logs via logstash
|
|
shell: cat /var/log/messages | /opt/logstash/bin/logstash -f /etc/logstash/conf.d/10-syslog.conf
|
|
when: "'logstash-' not in elasticsearch_index.content"
|
|
ignore_errors: true
|
|
|
|
- name: Install local rsyslogd for fluentd
|
|
yum: name={{ item }} state=present
|
|
become: true
|
|
with_items:
|
|
- rsyslog
|
|
when: (logging_backend == 'fluentd')
|
|
|
|
- name: Setup local rsyslogd for fluentd
|
|
lineinfile: dest=/etc/rsyslog.conf \
|
|
line="*.* @localhost:{{ fluentd_syslog_port }}"
|
|
when: (logging_backend == 'fluentd')
|
|
register: rsyslog_updated
|
|
|
|
- name: Populate elasticsearch index with local logs via fluentd
|
|
command: systemctl restart rsyslog.service
|
|
ignore_errors: true
|
|
when: rsyslog_updated != 0
|
|
|
|
- name: Install kibana rpms
|
|
yum: name={{ item }} state=present
|
|
become: true
|
|
with_items:
|
|
- kibana
|
|
- unzip
|
|
|
|
- name: Check kibana filebeat dashboards
|
|
stat: path=/tmp/filebeat-dashboards.zip
|
|
ignore_errors: true
|
|
register: kibana_dashboards_present
|
|
|
|
- name: Copy kibana filebeat dashboards
|
|
copy:
|
|
src=filebeat-dashboards.zip
|
|
dest=/tmp/filebeat-dashboards.zip
|
|
owner=root
|
|
group=root
|
|
mode=0644
|
|
become: true
|
|
ignore_errors: true
|
|
when: kibana_dashboards_present != 0
|
|
|
|
- name: Install kibana filebeat dashboards
|
|
unarchive: src=/tmp/filebeat-dashboards.zip dest=/tmp/ copy=no
|
|
ignore_errors: true
|
|
when: kibana_dashboards_present != 0
|
|
|
|
- name: Configure kibana filebeat dashboards
|
|
shell: sh /tmp/beats-dashboards-master/load.sh -url "http://localhost:9200" -user "admin:admin"
|
|
ignore_errors: true
|
|
|
|
- name: Check kibana users
|
|
stat: path=/etc/nginx/htpasswd.users
|
|
ignore_errors: true
|
|
register: kibana_user_pwfile_exists
|
|
|
|
- name: Create kibana admin user
|
|
command: htpasswd -b -c /etc/nginx/htpasswd.users admin admin
|
|
ignore_errors: true
|
|
when: kibana_user_pwfile_exists != 0
|
|
|
|
- name: Setup kibana service
|
|
service: name=kibana state=started enabled=true
|
|
become: true
|
|
|
|
- name: Check Filebeat forwarder SSL certificate
|
|
stat: path=/etc/pki/tls/certs/filebeat-forwarder.crt
|
|
ignore_errors: true
|
|
register: filebeat_forwarder_ssl_exists
|
|
|
|
- name: Create client forwarder SSL certificate
|
|
command: openssl req -subj '/CN={{ ansible_hostname }}/' -config /etc/pki/tls/openssl_extras.cnf \
|
|
-x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout /etc/pki/tls/private/filebeat-forwarder.key \
|
|
-out /etc/pki/tls/certs/filebeat-forwarder.crt
|
|
ignore_errors: true
|
|
when: filebeat_forwarder_ssl_exists != 0
|
|
|
|
- name: Check Filebeat forwarder SSL certificate copy
|
|
stat: path=/usr/share/nginx/html/filebeat-forwarder.crt
|
|
ignore_errors: true
|
|
register: filebeat_forwarder_ssl_client_copy_exists
|
|
|
|
- name: Copy Filebeat forwarder SSL certificate
|
|
command: cp /etc/pki/tls/certs/filebeat-forwarder.crt /usr/share/nginx/html/filebeat-forwarder.crt
|
|
ignore_errors: true
|
|
when: filebeat_forwarder_ssl_client_copy_exists != 0
|
|
|
|
- name: Refresh logstash service
|
|
command: systemctl restart logstash.service
|
|
ignore_errors: true
|
|
when: (logging_backend != 'fluentd')
|
|
become: true
|
|
|
|
- name: Refresh fluentd service
|
|
command: systemctl restart td-agent.service
|
|
when: (logging_backend == 'fluentd')
|
|
become: true
|
|
|
|
- name: Print SSL post-setup information
|
|
debug: msg="Filebeat SSL Certificate available at http://{{ ansible_hostname }}:{{ elk_server_ssl_cert_port }}/filebeat-forwarder.crt"
|
|
when: (logging_backend != 'fluentd')
|
|
|
|
- name: Print post-setup URL
|
|
debug: msg="*** ELK Services available at http://{{ ansible_hostname }}:{{ nginx_kibana_port }} ***"
|
|
|
|
- name: Print index creation instructions
|
|
debug: msg="** 1) Navigate to http://{{ ansible_hostname }}:{{ nginx_kibana_port }} and login with admin/admin, click 'create' on the green index button ***"
|
|
|
|
- name: Print filebeat openstack client setup instructions
|
|
debug: msg="** 2) Run ansible-playbook -i hosts install/elk-openstack-client.yml --extra-vars 'elk_server={{ ansible_default_ipv4.address }}' to setup OpenStack clients ***"
|
|
|
|
- name: Print filebeat client setup instructions
|
|
debug: msg="** 2) Run ansible-playbook -i hosts install/elk-client.yml --extra-vars 'elk_server={{ ansible_default_ipv4.address }}' to setup clients ***"
|