Add support for elasticsearch cluster.
We need to expand our elasticsearch install base. Update puppet to make this possible. Change-Id: Id0dae839b12ebf47715cf40a363832e0f661a94f Reviewed-on: https://review.openstack.org/33910 Reviewed-by: Jeremy Stanley <fungi@yuggoth.org> Reviewed-by: James E. Blair <corvus@inaugust.com> Approved: Clark Boylan <clark.boylan@gmail.com> Tested-by: Jenkins
This commit is contained in:
parent
60934a5cdd
commit
6c33ba3b70
|
@ -14,7 +14,9 @@
|
|||
#
|
||||
# Class to install elasticsearch.
|
||||
#
|
||||
class logstash::elasticsearch {
|
||||
class logstash::elasticsearch (
|
||||
discover_node = 'localhost'
|
||||
) {
|
||||
# install java runtime
|
||||
package { 'java7-runtime-headless':
|
||||
ensure => present,
|
||||
|
@ -40,7 +42,7 @@ class logstash::elasticsearch {
|
|||
|
||||
file { '/etc/elasticsearch/elasticsearch.yml':
|
||||
ensure => present,
|
||||
source => 'puppet:///modules/logstash/elasticsearch.yml',
|
||||
content => template('logstash/elasticsearch.yml.erb'),
|
||||
replace => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
|
|
|
@ -18,7 +18,7 @@ class logstash::web (
|
|||
$vhost_name = $::fqdn,
|
||||
$serveradmin = "webmaster@${::fqdn}",
|
||||
$frontend = 'internal',
|
||||
$elasticsearch_host = 'localhost',
|
||||
$discover_node = 'localhost',
|
||||
$proxy_elasticsearch = false
|
||||
) {
|
||||
include apache
|
||||
|
@ -57,7 +57,7 @@ class logstash::web (
|
|||
|
||||
'kibana': {
|
||||
class { 'kibana':
|
||||
elasticsearch_host => $elasticsearch_host,
|
||||
discover_node => $discover_node,
|
||||
}
|
||||
$vhost = 'logstash/kibana.vhost.erb'
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
# Node names are generated dynamically on startup, so you're relieved
|
||||
# from configuring them manually. You can tie this node to a specific name:
|
||||
#
|
||||
# node.name: "Franz Kafka"
|
||||
node.name: "<%= scope.lookupvar("::hostname") %>"
|
||||
|
||||
# Every node can be configured to allow or deny being eligible as the master,
|
||||
# and to allow or deny to store the data.
|
||||
|
@ -139,7 +139,7 @@
|
|||
index.store.compress.stored: true
|
||||
index.store.compress.tv: true
|
||||
|
||||
indices.memory.index_buffer_size: "40%"
|
||||
indices.memory.index_buffer_size: "33%"
|
||||
|
||||
#################################### Paths ####################################
|
||||
|
||||
|
@ -184,7 +184,7 @@ indices.memory.index_buffer_size: "40%"
|
|||
#
|
||||
# Set this property to true to lock the memory:
|
||||
#
|
||||
# bootstrap.mlockall: true
|
||||
bootstrap.mlockall: true
|
||||
|
||||
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
|
||||
# to the same value, and that the machine has enough memory to allocate
|
||||
|
@ -327,7 +327,7 @@ discovery.zen.ping.multicast.enabled: false
|
|||
# to perform discovery when new nodes (master or data) are started:
|
||||
#
|
||||
# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
|
||||
discovery.zen.ping.unicast.hosts: ["localhost"]
|
||||
discovery.zen.ping.unicast.hosts: ["<%= discover_node %>"]
|
||||
|
||||
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
|
||||
#
|
|
@ -11,9 +11,9 @@
|
|||
<% if proxy_elasticsearch == true %>
|
||||
# Proxy for elasticsearch _aliases, .*/_status, and .*/_search.
|
||||
<LocationMatch "^/elasticsearch/(_aliases|.*/_status|.*/_search)$">
|
||||
ProxyPassMatch http://<%= scope.lookupvar("::logstash::web::elasticsearch_host") %>:9200/$1
|
||||
ProxyPassMatch http://<%= scope.lookupvar("::logstash::web::discover_node") %>:9200/$1
|
||||
</LocationMatch>
|
||||
ProxyPassReverse /elasticsearch/ http://<%= scope.lookupvar("::logstash::web::elasticsearch_host") %>:9200/
|
||||
ProxyPassReverse /elasticsearch/ http://<%= scope.lookupvar("::logstash::web::discover_node") %>:9200/
|
||||
<% end %>
|
||||
|
||||
ProxyPass / http://127.0.0.1:5601/ retry=0
|
||||
|
|
Loading…
Reference in New Issue