Add support for elasticsearch cluster.
We need to expand our elasticsearch install base. Update puppet to make this possible. Change-Id: Id0dae839b12ebf47715cf40a363832e0f661a94f Reviewed-on: https://review.openstack.org/33910 Reviewed-by: Jeremy Stanley <fungi@yuggoth.org> Reviewed-by: James E. Blair <corvus@inaugust.com> Approved: Clark Boylan <clark.boylan@gmail.com> Tested-by: Jenkins
This commit is contained in:
parent
a60d8d50ee
commit
33367c88de
@ -201,32 +201,48 @@ node 'puppet-dashboard.openstack.org' {
|
||||
|
||||
node 'logstash.openstack.org' {
|
||||
class { 'openstack_project::logstash':
|
||||
sysadmins => hiera('sysadmins'),
|
||||
elasticsearch_masters => ['elasticsearch.openstack.org'],
|
||||
gearman_workers => [
|
||||
sysadmins => hiera('sysadmins'),
|
||||
elasticsearch_nodes => [
|
||||
'elasticsearch.openstack.org',
|
||||
'elasticsearch2.openstack.org',
|
||||
'elasticsearch3.openstack.org',
|
||||
],
|
||||
gearman_workers => [
|
||||
'logstash-worker1.openstack.org',
|
||||
'logstash-worker2.openstack.org',
|
||||
'logstash-worker3.openstack.org',
|
||||
],
|
||||
discover_node => 'elasticsearch.openstack.org',
|
||||
}
|
||||
}
|
||||
|
||||
node /^logstash-worker\d+\.openstack\.org$/ {
|
||||
class { 'openstack_project::logstash_worker':
|
||||
sysadmins => hiera('sysadmins'),
|
||||
elasticsearch_masters => ['elasticsearch.openstack.org'],
|
||||
sysadmins => hiera('sysadmins'),
|
||||
elasticsearch_nodes => [
|
||||
'elasticsearch.openstack.org',
|
||||
'elasticsearch2.openstack.org',
|
||||
'elasticsearch3.openstack.org',
|
||||
],
|
||||
discover_node => 'elasticsearch.openstack.org',
|
||||
}
|
||||
}
|
||||
|
||||
node 'elasticsearch.openstack.org' {
|
||||
node /^elasticsearch\d*\.openstack\.org$/ {
|
||||
class { 'openstack_project::elasticsearch':
|
||||
sysadmins => hiera('sysadmins'),
|
||||
logstash_workers => [
|
||||
sysadmins => hiera('sysadmins'),
|
||||
elasticsearch_nodes => [
|
||||
'elasticsearch.openstack.org',
|
||||
'elasticsearch2.openstack.org',
|
||||
'elasticsearch3.openstack.org',
|
||||
],
|
||||
elasticsearch_clients => [
|
||||
'logstash.openstack.org',
|
||||
'logstash-worker1.openstack.org',
|
||||
'logstash-worker2.openstack.org',
|
||||
'logstash-worker3.openstack.org',
|
||||
],
|
||||
discover_node => 'elasticsearch.openstack.org',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
# Class to install kibana frontend to logstash.
|
||||
#
|
||||
class kibana (
|
||||
$elasticsearch_host = 'localhost'
|
||||
$discover_node = 'localhost'
|
||||
) {
|
||||
|
||||
group { 'kibana':
|
||||
|
@ -7,7 +7,7 @@ module KibanaConfig
|
||||
# Your elastic search server(s). This may be set as an array for round robin
|
||||
# load balancing
|
||||
# Elasticsearch = ["elasticsearch1:9200","elasticsearch2:9200"]
|
||||
Elasticsearch = "<%= scope.lookupvar("::kibana::elasticsearch_host") %>:9200"
|
||||
Elasticsearch = "<%= scope.lookupvar("::kibana::discover_node") %>:9200"
|
||||
|
||||
#Set the Net::HTTP read/open timeouts for the connection to the ES backend
|
||||
ElasticsearchTimeout = 500
|
||||
|
@ -14,7 +14,9 @@
|
||||
#
|
||||
# Class to install elasticsearch.
|
||||
#
|
||||
class logstash::elasticsearch {
|
||||
class logstash::elasticsearch (
|
||||
discover_node = 'localhost'
|
||||
) {
|
||||
# install java runtime
|
||||
package { 'java7-runtime-headless':
|
||||
ensure => present,
|
||||
@ -40,7 +42,7 @@ class logstash::elasticsearch {
|
||||
|
||||
file { '/etc/elasticsearch/elasticsearch.yml':
|
||||
ensure => present,
|
||||
source => 'puppet:///modules/logstash/elasticsearch.yml',
|
||||
content => template('logstash/elasticsearch.yml.erb'),
|
||||
replace => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
|
@ -18,7 +18,7 @@ class logstash::web (
|
||||
$vhost_name = $::fqdn,
|
||||
$serveradmin = "webmaster@${::fqdn}",
|
||||
$frontend = 'internal',
|
||||
$elasticsearch_host = 'localhost',
|
||||
$discover_node = 'localhost',
|
||||
$proxy_elasticsearch = false
|
||||
) {
|
||||
include apache
|
||||
@ -57,7 +57,7 @@ class logstash::web (
|
||||
|
||||
'kibana': {
|
||||
class { 'kibana':
|
||||
elasticsearch_host => $elasticsearch_host,
|
||||
discover_node => $discover_node,
|
||||
}
|
||||
$vhost = 'logstash/kibana.vhost.erb'
|
||||
}
|
||||
|
@ -37,7 +37,7 @@
|
||||
# Node names are generated dynamically on startup, so you're relieved
|
||||
# from configuring them manually. You can tie this node to a specific name:
|
||||
#
|
||||
# node.name: "Franz Kafka"
|
||||
node.name: "<%= scope.lookupvar("::hostname") %>"
|
||||
|
||||
# Every node can be configured to allow or deny being eligible as the master,
|
||||
# and to allow or deny to store the data.
|
||||
@ -139,7 +139,7 @@
|
||||
index.store.compress.stored: true
|
||||
index.store.compress.tv: true
|
||||
|
||||
indices.memory.index_buffer_size: "40%"
|
||||
indices.memory.index_buffer_size: "33%"
|
||||
|
||||
#################################### Paths ####################################
|
||||
|
||||
@ -184,7 +184,7 @@ indices.memory.index_buffer_size: "40%"
|
||||
#
|
||||
# Set this property to true to lock the memory:
|
||||
#
|
||||
# bootstrap.mlockall: true
|
||||
bootstrap.mlockall: true
|
||||
|
||||
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
|
||||
# to the same value, and that the machine has enough memory to allocate
|
||||
@ -327,7 +327,7 @@ discovery.zen.ping.multicast.enabled: false
|
||||
# to perform discovery when new nodes (master or data) are started:
|
||||
#
|
||||
# discovery.zen.ping.unicast.hosts: ["host1", "host2:port", "host3[portX-portY]"]
|
||||
discovery.zen.ping.unicast.hosts: ["localhost"]
|
||||
discovery.zen.ping.unicast.hosts: ["<%= discover_node %>"]
|
||||
|
||||
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
|
||||
#
|
@ -11,9 +11,9 @@
|
||||
<% if proxy_elasticsearch == true %>
|
||||
# Proxy for elasticsearch _aliases, .*/_status, and .*/_search.
|
||||
<LocationMatch "^/elasticsearch/(_aliases|.*/_status|.*/_search)$">
|
||||
ProxyPassMatch http://<%= scope.lookupvar("::logstash::web::elasticsearch_host") %>:9200/$1
|
||||
ProxyPassMatch http://<%= scope.lookupvar("::logstash::web::discover_node") %>:9200/$1
|
||||
</LocationMatch>
|
||||
ProxyPassReverse /elasticsearch/ http://<%= scope.lookupvar("::logstash::web::elasticsearch_host") %>:9200/
|
||||
ProxyPassReverse /elasticsearch/ http://<%= scope.lookupvar("::logstash::web::discover_node") %>:9200/
|
||||
<% end %>
|
||||
|
||||
ProxyPass / http://127.0.0.1:5601/ retry=0
|
||||
|
@ -15,10 +15,14 @@
|
||||
# Elasticsearch server glue class.
|
||||
#
|
||||
class openstack_project::elasticsearch (
|
||||
$logstash_workers = [],
|
||||
$elasticsearch_nodes = [],
|
||||
$elasticsearch_clients = [],
|
||||
$discover_node = 'localhost',
|
||||
$sysadmins = []
|
||||
) {
|
||||
$iptables_rule = regsubst ($logstash_workers, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
$iptables_nodes_rule = regsubst ($elasticsearch_nodes, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
$iptables_clients_rule = regsubst ($elasticsearch_clients, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
$iptables_rule = flatten([$iptables_nodes_rule, $iptables_clients_rule])
|
||||
class { 'openstack_project::server':
|
||||
iptables_public_tcp_ports => [22],
|
||||
iptables_rules6 => $iptables_rule,
|
||||
@ -26,7 +30,9 @@ class openstack_project::elasticsearch (
|
||||
sysadmins => $sysadmins,
|
||||
}
|
||||
|
||||
include logstash::elasticsearch
|
||||
class { 'logstash::elasticsearch':
|
||||
discover_node => $discover_node,
|
||||
}
|
||||
|
||||
cron { 'delete_old_es_indices':
|
||||
user => 'root',
|
||||
|
@ -15,11 +15,12 @@
|
||||
# Logstash web frontend glue class.
|
||||
#
|
||||
class openstack_project::logstash (
|
||||
$elasticsearch_masters = [],
|
||||
$elasticsearch_nodes = [],
|
||||
$gearman_workers = [],
|
||||
$discover_node = 'elasticsearch.openstack.org',
|
||||
$sysadmins = []
|
||||
) {
|
||||
$iptables_es_rule = regsubst ($elasticsearch_masters, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
$iptables_es_rule = regsubst ($elasticsearch_nodes, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
$iptables_gm_rule = regsubst ($gearman_workers, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 4730 -s \1 -j ACCEPT')
|
||||
$iptables_rule = flatten([$iptables_es_rule, $iptables_gm_rule])
|
||||
class { 'openstack_project::server':
|
||||
@ -31,7 +32,7 @@ class openstack_project::logstash (
|
||||
|
||||
class { 'logstash::web':
|
||||
frontend => 'kibana',
|
||||
elasticsearch_host => 'elasticsearch.openstack.org',
|
||||
discover_node => $discover_node,
|
||||
proxy_elasticsearch => true,
|
||||
}
|
||||
|
||||
|
@ -15,10 +15,11 @@
|
||||
# Logstash indexer worker glue class.
|
||||
#
|
||||
class openstack_project::logstash_worker (
|
||||
$elasticsearch_masters = [],
|
||||
$elasticsearch_nodes = [],
|
||||
$discover_node = 'elasticsearch.openstack.org',
|
||||
$sysadmins = []
|
||||
) {
|
||||
$iptables_rule = regsubst ($elasticsearch_masters, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
$iptables_rule = regsubst ($elasticsearch_nodes, '^(.*)$', '-m state --state NEW -m tcp -p tcp --dport 9200:9400 -s \1 -j ACCEPT')
|
||||
class { 'openstack_project::server':
|
||||
iptables_public_tcp_ports => [22],
|
||||
iptables_rules6 => $iptables_rule,
|
||||
|
@ -127,7 +127,7 @@ filter {
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
host => "elasticsearch.openstack.org"
|
||||
host => "<%= scope.lookupvar("::openstack_project::logstash_worker::discover_node") %>"
|
||||
node_name => "<%= scope.lookupvar("::hostname") %>"
|
||||
max_inflight_requests => 512
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user