Re-enable galera http check (for haproxy)
This commit is contained in:
@@ -47,7 +47,9 @@ class cloud::database::sql (
|
||||
$neutron_db_password = $os_params::neutron_db_password,
|
||||
$neutron_db_allowed_hosts = $os_params::neutron_db_allowed_hosts,
|
||||
$mysql_password = $os_params::mysql_password,
|
||||
$mysql_sys_maint = $os_params::mysql_sys_maint
|
||||
$mysql_sys_maint = $os_params::mysql_sys_maint,
|
||||
$cluster_check_dbuser = $os_params::cluster_check_dbuser,
|
||||
$cluster_check_dbpassword = $os_params::cluster_check_dbpassword
|
||||
) {
|
||||
|
||||
include 'xinetd'
|
||||
@@ -144,6 +146,7 @@ class cloud::database::sql (
|
||||
allowed_hosts => $heat_db_allowed_hosts,
|
||||
}
|
||||
|
||||
|
||||
# Monitoring DB
|
||||
warning('Database mapping must be updated to puppetlabs/puppetlabs-mysql >= 2.x (see: https://dev.ring.enovance.com/redmine/issues/4510)')
|
||||
|
||||
@@ -152,14 +155,14 @@ class cloud::database::sql (
|
||||
charset => 'utf8',
|
||||
require => File['/root/.my.cnf']
|
||||
}
|
||||
database_user { 'clustercheckuser@localhost':
|
||||
database_user { "${cluster_check_dbuser}@localhost":
|
||||
ensure => 'present',
|
||||
# can not change password in clustercheck script
|
||||
password_hash => mysql_password('clustercheckpassword!'),
|
||||
password_hash => mysql_password($cluster_check_dbpassword),
|
||||
provider => 'mysql',
|
||||
require => File['/root/.my.cnf']
|
||||
}
|
||||
database_grant { 'clustercheckuser@localhost/monitoring':
|
||||
database_grant { "${cluster_check_dbuser}@localhost/monitoring":
|
||||
privileges => ['all']
|
||||
}
|
||||
|
||||
@@ -171,7 +174,32 @@ class cloud::database::sql (
|
||||
}
|
||||
|
||||
Database_user<<| |>>
|
||||
}
|
||||
|
||||
# Haproxy http monitoring
|
||||
file_line { 'mysqlchk-in-etc-services':
|
||||
path => '/etc/services',
|
||||
line => 'mysqlchk 9200/tcp',
|
||||
match => '^mysqlchk 9200/tcp$',
|
||||
notify => Service['xinetd'];
|
||||
}
|
||||
|
||||
file {
|
||||
'/etc/xinetd.d/mysqlchk':
|
||||
content => template('cloud/database/mysqlchk.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
require => File['/usr/bin/clustercheck'],
|
||||
notify => Service['xinetd'];
|
||||
'/usr/bin/clustercheck':
|
||||
ensure => present,
|
||||
content => template('cloud/database/clustercheck.erb'),
|
||||
mode => '0755',
|
||||
owner => 'root',
|
||||
group => 'root';
|
||||
}
|
||||
|
||||
} # if $::hostname == $galera_master
|
||||
|
||||
exec{'clean-mysql-binlog':
|
||||
# first sync take a long time
|
||||
|
||||
@@ -27,7 +27,8 @@ describe 'cloud::database::sql' do
|
||||
end
|
||||
|
||||
let :params do
|
||||
{ :service_provider => 'sysv',
|
||||
{
|
||||
:service_provider => 'sysv',
|
||||
:api_eth => '10.0.0.1',
|
||||
:galera_master => '10.0.0.1',
|
||||
:galera_nextserver => ['10.0.0.1','10.0.0.2','10.0.0.3'],
|
||||
@@ -56,7 +57,10 @@ describe 'cloud::database::sql' do
|
||||
:neutron_db_user => 'neutron',
|
||||
:neutron_db_password => 'secrete',
|
||||
:neutron_db_allowed_hosts => ['10.0.0.1','10.0.0.2','10.0.0.3'],
|
||||
:mysql_sys_maint => 'sys' }
|
||||
:mysql_sys_maint => 'sys',
|
||||
:cluster_check_dbuser => 'clustercheckuser',
|
||||
:cluster_check_dbpassword => 'clustercheckpassword!'
|
||||
}
|
||||
end
|
||||
|
||||
it 'configure mysql galera server' do
|
||||
|
||||
38
templates/database/clustercheck.erb
Normal file
38
templates/database/clustercheck.erb
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
# Managed by puppet
|
||||
# Module cloud
|
||||
#
|
||||
# Script to make a proxy (ie HAProxy) capable of monitoring Percona XtraDB Cluster nodes properly
|
||||
#
|
||||
# Author: Olaf van Zandwijk olaf.vanzandwijk@nedap.com
|
||||
# Documentation and download: https://github.com/olafz/percona-clustercheck
|
||||
#
|
||||
# Based on the original script from Unai Rodriguez
|
||||
#
|
||||
|
||||
MYSQL_USERNAME="<%= @cluster_check_dbuser %>"
|
||||
MYSQL_PASSWORD="<%= @cluster_check_dbpassword %>"
|
||||
ERR_FILE="/dev/null"
|
||||
AVAILABLE_WHEN_DONOR=0
|
||||
|
||||
#
|
||||
# Perform the query to check the wsrep_local_state
|
||||
#
|
||||
WSREP_STATUS=`mysql --user=${MYSQL_USERNAME} --password=${MYSQL_PASSWORD} -e "SHOW STATUS LIKE 'wsrep_local_state';" 2>${ERR_FILE} | awk '{if (NR!=1){print $2}}' 2>${ERR_FILE}`
|
||||
|
||||
if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
|
||||
then
|
||||
# Percona XtraDB Cluster node local state is 'Synced' == return HTTP 200
|
||||
/bin/echo -en "HTTP/1.1 200 OK\r\n"
|
||||
/bin/echo -en "Content-Type: text/plain\r\n"
|
||||
/bin/echo -en "\r\n"
|
||||
/bin/echo -en "Mariadb Cluster Node is synced.\r\n"
|
||||
/bin/echo -en "\r\n"
|
||||
else
|
||||
# Percona XtraDB Cluster node local state is not 'Synced' == return HTTP 503
|
||||
/bin/echo -en "HTTP/1.1 503 Service Unavailable\r\n"
|
||||
/bin/echo -en "Content-Type: text/plain\r\n"
|
||||
/bin/echo -en "\r\n"
|
||||
/bin/echo -en "Mariadb Cluster Node is not synced.\r\n"
|
||||
/bin/echo -en "\r\n"
|
||||
fi
|
||||
21
templates/database/mysqlchk.erb
Normal file
21
templates/database/mysqlchk.erb
Normal file
@@ -0,0 +1,21 @@
|
||||
# Managed by puppet
|
||||
# Module cloud
|
||||
#
|
||||
# default: on
|
||||
# description: mysqlchk
|
||||
service mysqlchk
|
||||
{
|
||||
# this is a config for xinetd, place it in /etc/xinetd.d/
|
||||
disable = no
|
||||
flags = REUSE
|
||||
socket_type = stream
|
||||
port = 9200
|
||||
wait = no
|
||||
user = nobody
|
||||
server = /usr/bin/clustercheck
|
||||
log_on_failure += USERID
|
||||
only_from = 0.0.0.0/0
|
||||
# recommended to put the IPs that need
|
||||
# to connect exclusively (security purposes)
|
||||
per_source = UNLIMITED
|
||||
}
|
||||
Reference in New Issue
Block a user