Add support for elasticsearch cluster.

We need to expand our elasticsearch install base. Update puppet to make
this possible.

Change-Id: Id0dae839b12ebf47715cf40a363832e0f661a94f
Reviewed-on: https://review.openstack.org/33910
Reviewed-by: Jeremy Stanley <fungi@yuggoth.org>
Reviewed-by: James E. Blair <corvus@inaugust.com>
Approved: Clark Boylan <clark.boylan@gmail.com>
Tested-by: Jenkins
This commit is contained in:
Clark Boylan 2013-06-20 18:37:41 -07:00 committed by Jenkins
parent a60d8d50ee
commit 33367c88de
11 changed files with 55 additions and 29 deletions

View File

@ -201,32 +201,48 @@ node 'puppet-dashboard.openstack.org' {
node 'logstash.openstack.org' { node 'logstash.openstack.org' {
class { 'openstack_project::logstash': class { 'openstack_project::logstash':
sysadmins => hiera('sysadmins'), sysadmins => hiera('sysadmins'),
elasticsearch_masters => ['elasticsearch.openstack.org'], elasticsearch_nodes => [
gearman_workers => [ 'elasticsearch.openstack.org',
'elasticsearch2.openstack.org',
'elasticsearch3.openstack.org',
],
gearman_workers => [
'logstash-worker1.openstack.org', 'logstash-worker1.openstack.org',
'logstash-worker2.openstack.org', 'logstash-worker2.openstack.org',
'logstash-worker3.openstack.org', 'logstash-worker3.openstack.org',
], ],
discover_node => 'elasticsearch.openstack.org',
} }
} }
node /^logstash-worker\d+\.openstack\.org$/ { node /^logstash-worker\d+\.openstack\.org$/ {
class { 'openstack_project::logstash_worker': class { 'openstack_project::logstash_worker':
sysadmins => hiera('sysadmins'), sysadmins => hiera('sysadmins'),
elasticsearch_masters => ['elasticsearch.openstack.org'], elasticsearch_nodes => [
'elasticsearch.openstack.org',
'elasticsearch2.openstack.org',
'elasticsearch3.openstack.org',
],
discover_node => 'elasticsearch.openstack.org',
} }
} }
node 'elasticsearch.openstack.org' { node /^elasticsearch\d*\.openstack\.org$/ {
class { 'openstack_project::elasticsearch': class { 'openstack_project::elasticsearch':
sysadmins => hiera('sysadmins'), sysadmins => hiera('sysadmins'),
logstash_workers => [ elasticsearch_nodes => [
'elasticsearch.openstack.org',
'elasticsearch2.openstack.org',
'elasticsearch3.openstack.org',
],
elasticsearch_clients => [
'logstash.openstack.org', 'logstash.openstack.org',
'logstash-worker1.openstack.org', 'logstash-worker1.openstack.org',
'logstash-worker2.openstack.org', 'logstash-worker2.openstack.org',
'logstash-worker3.openstack.org', 'logstash-worker3.openstack.org',
], ],
discover_node => 'elasticsearch.openstack.org',
} }
} }

View File

@ -15,7 +15,7 @@
# Class to install kibana frontend to logstash. # Class to install kibana frontend to logstash.
# #
class kibana ( class kibana (
$elasticsearch_host = 'localhost' $discover_node = 'localhost'
) { ) {
group { 'kibana': group { 'kibana':

View File

@ -7,7 +7,7 @@ module KibanaConfig
# Your elastic search server(s). This may be set as an array for round robin # Your elastic search server(s). This may be set as an array for round robin
# load balancing # load balancing
# Elasticsearch = ["elasticsearch1:9200","elasticsearch2:9200"] # Elasticsearch = ["elasticsearch1:9200","elasticsearch2:9200"]
Elasticsearch = "<%= scope.lookupvar("::kibana::elasticsearch_host") %>:9200" Elasticsearch = "<%= scope.lookupvar("::kibana::discover_node") %>:9200"
#Set the Net::HTTP read/open timeouts for the connection to the ES backend #Set the Net::HTTP read/open timeouts for the connection to the ES backend
ElasticsearchTimeout = 500 ElasticsearchTimeout = 500

View File

@ -14,7 +14,9 @@
# #
# Class to install elasticsearch. # Class to install elasticsearch.
# #
class logstash::elasticsearch { class logstash::elasticsearch (
discover_node = 'localhost'
) {
# install java runtime # install java runtime
package { 'java7-runtime-headless': package { 'java7-runtime-headless':
ensure => present, ensure => present,
@ -40,7 +42,7 @@ class logstash::elasticsearch {
file { '/etc/elasticsearch/elasticsearch.yml': file { '/etc/elasticsearch/elasticsearch.yml':
ensure => present, ensure => present,
source => 'puppet:///modules/logstash/elasticsearch.yml', content => template('logstash/elasticsearch.yml.erb'),
replace => true, replace => true,
owner => 'root', owner => 'root',
group => 'root', group => 'root',

View File

@ -18,7 +18,7 @@ class logstash::web (
$vhost_name = $::fqdn, $vhost_name = $::fqdn,
$serveradmin = "webmaster@${::fqdn}", $serveradmin = "webmaster@${::fqdn}",
$frontend = 'internal', $frontend = 'internal',
$elasticsearch_host = 'localhost', $discover_node = 'localhost',
$proxy_elasticsearch = false $proxy_elasticsearch = false
) { ) {
include apache include apache
@ -57,7 +57,7 @@ class logstash::web (
'kibana': { 'kibana': {
class { 'kibana': class { 'kibana':
elasticsearch_host => $elasticsearch_host, discover_node => $discover_node,
} }
$vhost = 'logstash/kibana.vhost.erb' $vhost = 'logstash/kibana.vhost.erb'
} }

View File

@ -37,7 +37,7 @@
# Node names are generated dynamically on startup, so you're relieved # Node names are generated dynamically on startup, so you're relieved
# from configuring them manually. You can tie this node to a specific name: # from configuring them manually. You can tie this node to a specific name:
# #
# node.name: "Franz Kafka" node.name: "<%= scope.lookupvar("::hostname") %>"
# Every node can be configured to allow or deny being eligible as the master, # Every node can be configured to allow or deny being eligible as the master,
# and to allow or deny to store the data. # and to allow or deny to store the data.
@ -139,7 +139,7 @@
index.store.compress.stored: true index.store.compress.stored: true
index.store.compress.tv: true index.store.compress.tv: true
indices.memory.index_buffer_size: "40%" indices.memory.index_buffer_size: "33%"
#################################### Paths #################################### #################################### Paths ####################################
@ -184,7 +184,7 @@ indices.memory.index_buffer_size: "40%"
# #
# Set this property to true to lock the memory: # Set this property to true to lock the memory:
# #
# bootstrap.mlockall: true bootstrap.mlockall: true
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set # Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate # to the same value, and that the machine has enough memory to allocate
@ -327,7 +327,7 @@ discovery.zen.ping.multicast.enabled: false
# to perform discovery when new nodes (master or data) are started: # to perform discovery when new nodes (master or data) are started:
# #
# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"] # discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
discovery.zen.ping.unicast.hosts: ["localhost"] discovery.zen.ping.unicast.hosts: ["<%= discover_node %>"]
# EC2 discovery allows to use AWS EC2 API in order to perform discovery. # EC2 discovery allows to use AWS EC2 API in order to perform discovery.
# #

View File

@ -11,9 +11,9 @@
<% if proxy_elasticsearch == true %> <% if proxy_elasticsearch == true %>
# Proxy for elasticsearch _aliases, .*/_status, and .*/_search. # Proxy for elasticsearch _aliases, .*/_status, and .*/_search.
<LocationMatch "^/elasticsearch/(_aliases|.*/_status|.*/_search)$"> <LocationMatch "^/elasticsearch/(_aliases|.*/_status|.*/_search)$">
ProxyPassMatch http://<%= scope.lookupvar("::logstash::web::elasticsearch_host") %>:9200/$1 ProxyPassMatch http://<%= scope.lookupvar("::logstash::web::discover_node") %>:9200/$1
</LocationMatch> </LocationMatch>
ProxyPassReverse /elasticsearch/ http://<%= scope.lookupvar("::logstash::web::elasticsearch_host") %>:9200/ ProxyPassReverse /elasticsearch/ http://<%= scope.lookupvar("::logstash::web::discover_node") %>:9200/
<% end %> <% end %>
ProxyPass / http://127.0.0.1:5601/ retry=0 ProxyPass / http://127.0.0.1:5601/ retry=0

View File

@ -15,10 +15,14 @@
# Elasticsearch server glue class. # Elasticsearch server glue class.
# #
class openstack_project::elasticsearch ( class openstack_project::elasticsearch (
$logstash_workers = [], $elasticsearch_nodes = [],
$elasticsearch_clients = [],
$discover_node = 'localhost',
$sysadmins = [] $sysadmins = []
) { ) {
$iptables_rule = regsubst ($logstash_workers, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT') $iptables_nodes_rule = regsubst ($elasticsearch_nodes, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
$iptables_clients_rule = regsubst ($elasticsearch_clients, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
$iptables_rule = flatten([$iptables_nodes_rule, $iptables_clients_rule])
class { 'openstack_project::server': class { 'openstack_project::server':
iptables_public_tcp_ports => [22], iptables_public_tcp_ports => [22],
iptables_rules6 => $iptables_rule, iptables_rules6 => $iptables_rule,
@ -26,7 +30,9 @@ class openstack_project::elasticsearch (
sysadmins => $sysadmins, sysadmins => $sysadmins,
} }
include logstash::elasticsearch class { 'logstash::elasticsearch':
discover_node => $discover_node,
}
cron { 'delete_old_es_indices': cron { 'delete_old_es_indices':
user => 'root', user => 'root',

View File

@ -15,11 +15,12 @@
# Logstash web frontend glue class. # Logstash web frontend glue class.
# #
class openstack_project::logstash ( class openstack_project::logstash (
$elasticsearch_masters = [], $elasticsearch_nodes = [],
$gearman_workers = [], $gearman_workers = [],
$discover_node = 'elasticsearch.openstack.org',
$sysadmins = [] $sysadmins = []
) { ) {
$iptables_es_rule = regsubst ($elasticsearch_masters, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT') $iptables_es_rule = regsubst ($elasticsearch_nodes, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
$iptables_gm_rule = regsubst ($gearman_workers, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 4730 -s \1 -j ACCEPT') $iptables_gm_rule = regsubst ($gearman_workers, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 4730 -s \1 -j ACCEPT')
$iptables_rule = flatten([$iptables_es_rule, $iptables_gm_rule]) $iptables_rule = flatten([$iptables_es_rule, $iptables_gm_rule])
class { 'openstack_project::server': class { 'openstack_project::server':
@ -31,7 +32,7 @@ class openstack_project::logstash (
class { 'logstash::web': class { 'logstash::web':
frontend => 'kibana', frontend => 'kibana',
elasticsearch_host => 'elasticsearch.openstack.org', discover_node => $discover_node,
proxy_elasticsearch => true, proxy_elasticsearch => true,
} }

View File

@ -15,10 +15,11 @@
# Logstash indexer worker glue class. # Logstash indexer worker glue class.
# #
class openstack_project::logstash_worker ( class openstack_project::logstash_worker (
$elasticsearch_masters = [], $elasticsearch_nodes = [],
$discover_node = 'elasticsearch.openstack.org',
$sysadmins = [] $sysadmins = []
) { ) {
$iptables_rule = regsubst ($elasticsearch_masters, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT') $iptables_rule = regsubst ($elasticsearch_nodes, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
class { 'openstack_project::server': class { 'openstack_project::server':
iptables_public_tcp_ports => [22], iptables_public_tcp_ports => [22],
iptables_rules6 => $iptables_rule, iptables_rules6 => $iptables_rule,

View File

@ -127,7 +127,7 @@ filter {
output { output {
elasticsearch { elasticsearch {
host => "elasticsearch.openstack.org" host => "<%= scope.lookupvar("::openstack_project::logstash_worker::discover_node") %>"
node_name => "<%= scope.lookupvar("::hostname") %>" node_name => "<%= scope.lookupvar("::hostname") %>"
max_inflight_requests => 512 max_inflight_requests => 512
} }