Deploy Elasticsearch cluster

Configure a VIP and Corosync/Pacemaker cluster. The plugin must override
Hiera data to set explicitly the corosync node list.

Configure Elasticsearch instances with parameters:

* cluster.name
* unicast discovery

Add firewall rule to allow clustering traffic.

Implements: elasticsearch-clustering

Change-Id: I0636e02113bfdacc776beb20c08cc88308486c29
This commit is contained in:
Swann Croiset 2015-12-14 16:00:14 +01:00
parent 6d85b05a90
commit 125a0ad602
11 changed files with 135 additions and 10 deletions

View File

@ -12,7 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
#
prepare_network_config(hiera('network_scheme', {}))
$mgmt_address = get_network_role_property('management', 'ipaddr')
$elasticsearch_kibana = hiera('elasticsearch_kibana')
$network_metadata = hiera('network_metadata')
$es_nodes = get_nodes_hash_by_roles($network_metadata, ['elasticsearch_kibana'])
$es_address_map = get_node_to_ipaddr_map_by_network_role($es_nodes, 'management')
$es_nodes_ips = values($es_address_map)
include lma_logging_analytics::params
# Params related to Elasticsearch.
$es_dir = $elasticsearch_kibana['data_dir']
@ -47,16 +55,25 @@ class { 'elasticsearch':
# Start an instance of elasticsearch
elasticsearch::instance { $es_instance:
config => {
'threadpool.bulk.queue_size' => '1000',
'bootstrap.mlockall' => true,
'http.cors.allow-origin' => '/.*/',
'http.cors.enabled' => true
},
'threadpool.bulk.queue_size' => '1000',
'bootstrap.mlockall' => true,
'http.cors.allow-origin' => '/.*/',
'http.cors.enabled' => true,
'cluster.name' => $lma_logging_analytics::params::es_cluster_name,
'node.name' => "${::fqdn}_${es_instance}",
'node.master' => true,
'node.data' => true,
'discovery.zen.ping.multicast' => {'enabled' => false},
'discovery.zen.ping.unicast.hosts' => $es_nodes_ips,
'http.bind_host' => $mgmt_address,
'transport.bind_host' => $mgmt_address,
}
}
lma_logging_analytics::es_template { ['log', 'notification']:
number_of_replicas => 0 + $elasticsearch_kibana['number_of_replicas'],
require => Elasticsearch::Instance[$es_instance],
host => $mgmt_address,
}
class { 'lma_logging_analytics::curator':

View File

@ -38,12 +38,18 @@ firewall {'020 ssh':
action => 'accept',
}
firewall { '100 elasticsearch':
firewall { '100 elasticsearch REST':
port => 9200,
proto => 'tcp',
action => 'accept',
}
firewall { '110 elasticsearch clustering':
port => 9300,
proto => 'tcp',
action => 'accept',
}
firewall { '101 kibana':
port => 80,
proto => 'tcp',

View File

@ -0,0 +1,44 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
$hiera_dir = '/etc/hiera/override'
$plugin_name = 'elasticsearch_kibana'
$plugin_yaml = "${plugin_name}.yaml"
$corosync_roles = [$plugin_name]
$calculated_content = inline_template('
corosync_roles:
<%
@corosync_roles.each do |crole|
%> - <%= crole %>
<% end -%>
')
file {$hiera_dir:
ensure => directory,
} ->
file { "${hiera_dir}/${plugin_yaml}":
ensure => file,
content => "${calculated_content}\n",
}
package {'ruby-deep-merge':
ensure => 'installed',
}
file_line {"${plugin_name}_hiera_override":
path => '/etc/hiera.yaml',
line => " - override/${plugin_name}",
after => ' - "override/module/%{calling_module}"',
}

View File

@ -12,8 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
prepare_network_config(hiera('network_scheme', {}))
$mgmt_address = get_network_role_property('management', 'ipaddr')
$elasticsearch_kibana = hiera('elasticsearch_kibana')
class { 'lma_logging_analytics::kibana':
number_of_replicas => 0 + $elasticsearch_kibana['number_of_replicas']
number_of_replicas => 0 + $elasticsearch_kibana['number_of_replicas'],
es_host => $mgmt_address,
}

View File

@ -17,10 +17,12 @@
define lma_logging_analytics::es_template (
$number_of_shards = 3,
$number_of_replicas = 0,
$host = 'localhost',
) {
$index_prefix = $title
elasticsearch::template { $title:
content => template('lma_logging_analytics/es_template.json.erb'),
host => $host,
}
}

View File

@ -17,6 +17,7 @@
class lma_logging_analytics::kibana (
$number_of_replicas = $lma_logging_analytics::params::kibana_replicas,
$es_host = 'localhost',
) inherits lma_logging_analytics::params {
validate_integer($number_of_replicas)
@ -45,7 +46,8 @@ class lma_logging_analytics::kibana (
}
elasticsearch::template { 'kibana':
content => "{\"template\":\"kibana-*\", \"settings\": {\"number_of_replicas\":${number_of_replicas}}}"
content => "{\"template\":\"kibana-*\", \"settings\": {\"number_of_replicas\":${number_of_replicas}}}",
host => $es_host,
}
# Note that the dashboards are stored in templates/ because it is the only way
@ -55,6 +57,7 @@ class lma_logging_analytics::kibana (
# for details
lma_logging_analytics::kibana_dashboard { 'logs':
content => template('lma_logging_analytics/kibana_dashboards/logs.json'),
host => $es_host,
require => [File["${dashboard_dir}/logs.json"], Elasticsearch::Template['kibana']],
}
@ -65,6 +68,7 @@ class lma_logging_analytics::kibana (
lma_logging_analytics::kibana_dashboard { 'notifications':
content => template('lma_logging_analytics/kibana_dashboards/notifications.json'),
host => $es_host,
require => [File["${dashboard_dir}/notifications.json"], Elasticsearch::Template['kibana']],
}

View File

@ -13,11 +13,13 @@
# under the License.
#
define lma_logging_analytics::kibana_dashboard (
$es_url = 'http://localhost:9200',
$host = 'localhost',
$port = '9200',
$content = undef,
) {
include lma_logging_analytics::params
$es_url = "http://${host}:${port}"
$dashboard_title = join([$lma_logging_analytics::params::kibana_dashboard_prefix, capitalize($title)], '')
$dashboard_id = uriescape($dashboard_title)

View File

@ -15,6 +15,7 @@
# Class lma_logging_analytics::params
class lma_logging_analytics::params {
$es_cluster_name = 'lma'
$retention_period = 30
$indexes_prefixes = []

View File

@ -2,14 +2,41 @@
type: group
role: [elasticsearch_kibana]
tasks:
- fuel_pkgs
- hiera
- globals
- tools
- logging
- netconfig
- hosts
- firewall
- deploy_start
- cluster
- cluster-haproxy
- openstack-haproxy-stats
required_for: [deploy_end]
requires: [deploy_start]
parameters:
strategy:
type: parallel
type: one_by_one
- id: es-virtual-ip
type: puppet
groups: [elasticsearch_kibana]
required_for: [deploy_end]
requires: [cluster]
parameters:
puppet_manifest: "puppet/modules/osnailyfacter/modular/virtual_ips/virtual_ips.pp"
puppet_modules: "puppet/modules"
timeout: 3600
- id: es-hiera-override
type: puppet
groups: [elasticsearch_kibana]
requires: [globals]
required_for: [logging]
parameters:
puppet_manifest: "puppet/manifests/hiera_override.pp"
puppet_modules: "puppet/modules"
timeout: 120

11
network_roles.yaml Normal file
View File

@ -0,0 +1,11 @@
- id: "elasticsearch"
default_mapping: "management"
properties:
subnet: true
gateway: false
vip:
- name: "es_vip_mgmt"
namespace: "haproxy"
alias: "elasticsearch"
node_roles:
- "elasticsearch_kibana"

View File

@ -8,6 +8,8 @@ CONCAT_TARBALL_URL="https://forgeapi.puppetlabs.com/v3/files/puppetlabs-concat-1
STDLIB_TARBALL_URL="https://forgeapi.puppetlabs.com/v3/files/puppetlabs-stdlib-4.7.0.tar.gz"
NGINX_TARBALL_URL="https://forgeapi.puppetlabs.com/v3/files/jfryman-nginx-0.2.2.tar.gz"
ELASTICSEARCH_TARBALL_URL="https://forgeapi.puppetlabs.com/v3/files/elasticsearch-elasticsearch-0.9.1.tar.gz"
FUEL_LIB_COMMIT="7.0"
FUEL_LIB_TARBALL_URL="https://github.com/openstack/fuel-library/archive/${FUEL_LIB_COMMIT}.tar.gz"
# Kibana 3 sources
KIBANA_TARBALL_URL="https://download.elasticsearch.org/kibana/kibana/kibana-3.1.2.tar.gz"
@ -39,3 +41,9 @@ download_puppet_module "elasticsearch" "$ELASTICSEARCH_TARBALL_URL"
KIBANA_FOLDER="${MODULES_DIR}/lma_logging_analytics/files/kibana/src"
mkdir -p "${KIBANA_FOLDER}"
wget -qO- "${KIBANA_TARBALL_URL}" | tar -C "${KIBANA_FOLDER}" --strip-components=1 -xz
# Extract dependent manifests from fuel-library
rm -rf "${MODULES_DIR:?}"/{l23network,osnailyfacter,cluster,pacemaker_wrappers,pacemaker,openstack}
wget -qO- "${FUEL_LIB_TARBALL_URL}" | \
tar -C "${MODULES_DIR}" --strip-components=3 -zxvf - \
fuel-library-${FUEL_LIB_COMMIT}/deployment/puppet/{l23network,osnailyfacter,cluster,pacemaker_wrappers,pacemaker,openstack}