Update for ha clustering

This commit is contained in:
James Page 2013-01-09 09:55:00 +00:00
parent bb62f23ea8
commit fd249102af
6 changed files with 246 additions and 9 deletions

View File

@ -104,3 +104,27 @@ options:
default: None
type: string
description: Comma separated list of key=value config flags to be set in nova.conf.
# HA configuration settings
vip:
type: string
description: "Virtual IP to use to front API services in ha configuration"
vip_iface:
type: string
default: eth0
description: "Network Interface where to place the Virtual IP"
vip_cidr:
type: int
default: 24
description: "Netmask that will be used for the Virtual IP"
ha-bindiface:
type: string
default: eth0
description: |
Default network interface on which HA cluster will bind to communication
with the other members of the HA Cluster.
ha-mcastport:
type: int
default: 5408
description: |
Default multicast port number that will be used to communicate between
HA Cluster nodes.

View File

@ -200,3 +200,106 @@ except socket.error:
pass
"
}
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
HAPROXY_DEFAULT=/etc/default/haproxy
##########################################################################
# Description: Configures HAProxy services for Openstack API's
# Parameters:
# Space delimited list of service:port combinations for which
# haproxy service configuration should be generated for. The function
# assumes the name of the peer relation is 'cluster' and that every
# service unit in the peer relation is running the same services.
#
# The HAProxy service will listen on port + 10000.
# Example:
# configure_haproxy cinder_api:12345 nova_api:9999
##########################################################################
configure_haproxy() {
local address=`unit-get private-address`
local name=${JUJU_UNIT_NAME////-}
cat > $HAPROXY_CFG << EOF
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
user haproxy
group haproxy
spread-checks 0
defaults
log global
mode http
option httplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
timeout client 1000
timeout server 1000
listen stats :8888
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
EOF
for service in $@; do
local service_name=$(echo $service | cut -d : -f 1)
local api_listen_port=$(echo $service | cut -d : -f 2)
local haproxy_listen_port=$(($api_listen_port + 10000))
cat >> $HAPROXY_CFG << EOF
listen $service_name 0.0.0.0:$haproxy_listen_port
balance roundrobin
option tcplog
server $name $address:$api_listen_port check
EOF
for r_id in `relation-ids cluster`; do
for unit in `relation-list -r $r_id`; do
local unit_name=${unit////-}
local unit_address=`relation-get -r $r_id private-address $unit`
if [ -n "$unit_address" ]; then
echo " server $unit_name $unit_address:$api_listen_port check" \
>> $HAPROXY_CFG
fi
done
done
done
echo "ENABLED=1" > $HAPROXY_DEFAULT
}
##########################################################################
# Description: Query HA interface to determine is cluster is configured
# Returns: 0 if configured, 1 if not configured
##########################################################################
is_clustered() {
for r_id in `relation-ids ha`; do
for unit in `relation-list -r $r_id`; do
clustered=`relation-get -r $r_id clustered $unit`
if [ -n "$clustered" ]; then
return 0
fi
done
done
return 1
}
##########################################################################
# Description: Determines whether host is owner of clustered services
# Parameters: Name of CRM resource to check ownership of
# Returns: 0 if leader, 1 if not leader
##########################################################################
is_leader() {
hostname=`hostname`
if [ -x /usr/sbin/crm ]; then
if crm resource show $1 | grep -q $hostname; then
return 0
fi
fi
return 1
}

View File

@ -13,7 +13,7 @@ if [[ -n "$n_vol" ]] ; then
dpkg -l | grep -q nova-api-os-volume || apt-get -y install nova-api-os-volume
fi
PACKAGES="$SERVICES python-mysqldb python-keystone uuid"
PACKAGES="$SERVICES python-mysqldb python-keystone uuid haproxy"
NET_MANAGER=$(config-get network-manager)
if [ "$NET_MANAGER" == "Quantum" ]; then

View File

@ -64,7 +64,9 @@ function config_changed {
set_config_flags
if [ "$NET_MANAGER" == "Quantum" ]; then
if [ "$NET_MANAGER" == "Quantum" ] && \
is_clustered && is_leader 'res_nova_vip' || \
! is_clustered; then
configure_quantum_networking
fi
@ -169,9 +171,27 @@ function keystone_joined {
# we need to get two entries into keystone's catalog, nova + ec2
# group, them by prepending $service_ to each setting. the keystone
# charm will assemble settings into corresponding catalog entries
nova_url="http://$(unit-get private-address):8774/v1.1/\$(tenant_id)s"
ec2_url="http://$(unit-get private-address):8773/services/Cloud"
s3_url="http://$(unit-get private-address):3333"
if is_clustered && is_leader 'res_nova_vip'; then
address=$(config-get vip)
nova_port=18774
ec2_port=18773
s3_port=13333
quantum_port=19696
vol_port=18776
elif ! is_clustered; then
address=$(unit-get private-address)
nova_port=8774
ec2_port=8773
s3_port=3333
quantum_port=9696
vol_port=8776
else
# Not the leader and clustered - no action required
return 0
fi
nova_url="http://$address:$nova_port/v1.1/\$(tenant_id)s"
ec2_url="http://$address:$ec2_port/services/Cloud"
s3_url="http://$address:$s3_port"
# these are the default endpoints
relation-set nova_service="nova" \
@ -191,7 +211,7 @@ function keystone_joined {
s3_internal_url="$s3_url"
if [ "$(config-get network-manager)" == "Quantum" ]; then
quantum_url="http://$(unit-get private-address):9696"
quantum_url="http://$address:$quantum_port"
relation-set quantum_service="quantum" \
quantum_region="RegionOne" \
quantum_public_url="$quantum_url" \
@ -201,7 +221,7 @@ function keystone_joined {
# tack on an endpoint for nova-volume a relation exists.
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
nova_vol_url="http://$address:$vol_port/v1/\$(tenant_id)s"
relation-set nova-volume_service="nova-volume" \
nova-volume_region="RegionOne" \
nova-volume_public_url="$nova_vol_url" \
@ -365,7 +385,7 @@ compute_joined() {
fi
}
quantum_joined() {
function quantum_joined() {
# Tell quantum service about keystone
rids=$(relation-ids identity-service)
for rid in $rids; do
@ -386,6 +406,85 @@ quantum_joined() {
relation-set quantum_plugin=$(config-get quantum-plugin)
}
function cluster_changed() {
configure_haproxy "quantum_api:9696" "nova_api:8774" \
"ec2_api:8773" "s3_api:3333" \
"volume_api:8776"
}
function ha_relation_joined() {
local corosync_bindiface=`config-get ha-bindiface`
local corosync_mcastport=`config-get ha-mcastport`
local vip=`config-get vip`
local vip_iface=`config-get vip_iface`
local vip_cidr=`config-get vip_cidr`
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
[ -n "$corosync_mcastport" ]; then
# TODO: This feels horrible but the data required by the hacluster
# charm is quite complex and is python ast parsed.
resources="{
'res_nova_vip':'ocf:heartbeat:IPaddr2',
'res_nova_haproxy':'lsb:haproxy'
}"
resource_params="{
'res_nova_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
'res_nova_haproxy': 'op monitor interval=\"5s\"'
}"
init_services="{
'res_nova_haproxy':'haproxy'
}"
groups="{
'grp_nova_haproxy':'res_nova_vip res_nova_haproxy'
}"
relation-set corosync_bindiface=$corosync_bindiface \
corosync_mcastport=$corosync_mcastport \
resources="$resources" resource_params="$resource_params" \
init_services="$init_services" groups="$groups"
else
juju-log "Insufficient configuration data to configure hacluster"
exit 1
fi
}
function ha_relation_changed() {
local clustered=`relation-get clustered`
if [ -n "$clustered" ] && is_leader 'res_nova_vip'; then
for r_id in `relation-ids identity-service`; do
address=$(config-get vip)
nova_url="http://$address:18774/v1.1/\$(tenant_id)s"
ec2_url="http://$address:18773/services/Cloud"
s3_url="http://$address:13333"
relation-set -r $r_id \
nova_public_url="$nova_url" \
nova_admin_url="$nova_url" \
nova_internal_url="$nova_url" \
ec2_public_url="$ec2_url" \
ec2_admin_url="$ec2_url" \
ec2_internal_url="$ec2_url" \
s3_public_url="$s3_url" \
s3_admin_url="$s3_url" \
s3_internal_url="$s3_url"
if [ "$(config-get network-manager)" == "Quantum" ]; then
quantum_url="http://$address:19696"
relation-set -r $r_id \
quantum_public_url="$quantum_url" \
quantum_admin_url="$quantum_url" \
quantum_internal_url="$quantum_url"
fi
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
nova_vol_url="http://$address:18776/v1/\$(tenant_id)s"
relation-set -r $r_id \
nova-volume_public_url="$nova_vol_url" \
nova-volume_admin_url="$nova_vol_url" \
nova-volume_internal_url="$nova_vol_url"
fi
done
fi
}
arg0=$(basename $0)
case $arg0 in
"start"|"stop") service_ctl all $arg0 ;;
@ -404,5 +503,9 @@ case $arg0 in
"nova-volume-service-relation-joined") volume_joined ;;
"cloud-compute-relation-joined") compute_joined ;;
"quantum-network-service-relation-joined") quantum_joined ;;
"cluster-relation-changed") cluster_changed ;;
"cluster-relation-departed") cluster_changed ;;
"ha-relation-joined") ha_relation_joined ;;
"ha-relation-changed") ha_relation_changed ;;
*) exit 0 ;;
esac

View File

@ -24,3 +24,10 @@ requires:
interface: nova-volume
quantum-network-service:
interface: quantum
ha:
interface: hacluster
scope: container
peers:
cluster:
interface: nova-ha

View File

@ -1 +1 @@
166
167