Support for Grizzly. Support for HA.

This commit is contained in:
Adam Gandelman
2013-06-03 10:34:04 -07:00
15 changed files with 821 additions and 56 deletions

View File

@@ -58,3 +58,42 @@ options:
default: 8776 default: 8776
type: int type: int
description: OpenStack Volume API listening port. description: OpenStack Volume API listening port.
region:
default: RegionOne
type: string
description: OpenStack Region
# HA configuration settings
vip:
type: string
description: "Virtual IP to use to front cinder API in ha configuration"
vip_iface:
type: string
default: eth0
description: "Network Interface where to place the Virtual IP"
vip_cidr:
type: int
default: 24
description: "Netmask that will be used for the Virtual IP"
ha-bindiface:
type: string
default: eth0
description: |
Default network interface on which HA cluster will bind to communication
with the other members of the HA Cluster.
ha-mcastport:
type: int
default: 5401
description: |
Default multicast port number that will be used to communicate between
HA Cluster nodes.
# Per-service HTTPS configuration.
ssl_cert:
type: string
description: |
SSL certificate to install and use for API ports. Setting this value
and ssl_key will enable reverse proxying, point Glance's entry in the
Keystone catalog to use https, and override any certficiate and key
issued by Keystone (if it is configured to do so).
ssl_key:
type: string
description: SSL key to use with certificate specified as ssl_cert.

View File

@@ -2,7 +2,7 @@
CHARM="cinder" CHARM="cinder"
COMMON_PACKAGES="cinder-common python-mysqldb gdisk" COMMON_PACKAGES="cinder-common python-mysqldb gdisk haproxy"
API_PACKAGES="cinder-api" API_PACKAGES="cinder-api"
VOL_PACKAGES="cinder-volume" VOL_PACKAGES="cinder-volume"
SCHED_PACKAGES="cinder-scheduler" SCHED_PACKAGES="cinder-scheduler"
@@ -10,10 +10,14 @@ SCHED_PACKAGES="cinder-scheduler"
CINDER_CONF="/etc/cinder/cinder.conf" CINDER_CONF="/etc/cinder/cinder.conf"
API_CONF="/etc/cinder/api-paste.ini" API_CONF="/etc/cinder/api-paste.ini"
if [[ -e $CHARM_DIR/lib/openstack-common ]] ; then CONFIG_CHANGED="False"
. $CHARM_DIR/lib/openstack-common
HOOKS_DIR="$CHARM_DIR/hooks"
if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then
. $HOOKS_DIR/lib/openstack-common
else else
juju-log "Couldn't load $CHARM_DIR/openstack-common" && exit 1 juju-log "Couldn't load $HOOKS_DIR/openstack-common" && exit 1
fi fi
service_enabled() { service_enabled() {
@@ -77,6 +81,7 @@ function set_or_update {
juju-log "cinder: Setting new option $KEY=$VALUE in $CONF_FILE" juju-log "cinder: Setting new option $KEY=$VALUE in $CONF_FILE"
echo "$out$VALUE" >>$CONF_FILE echo "$out$VALUE" >>$CONF_FILE
fi fi
CONFIG_CHANGED="True"
} }
cinder_ctl() { cinder_ctl() {
@@ -90,16 +95,8 @@ cinder_ctl() {
else else
svcs=$svc svcs=$svc
fi fi
for s in $svcs ; do SERVICES=$svcs
case "$action" in service_ctl all $action
"start")
service_ctl_status $s || service $s start ;;
"stop")
service_ctl_status $s && service $s stop || return 0 ;;
"restart")
service_ctl_status $s && service $s restart || service $s start ;;
esac
done
} }
clean_storage() { clean_storage() {
@@ -228,3 +225,80 @@ prepare_storage() {
error_out "Could not create volume group: $vol_group" error_out "Could not create volume group: $vol_group"
return 0 return 0
} }
configure_https() {
# request openstack-common setup reverse proxy mapping for API and registry
# servers
service_enabled "api" || return 0
local cfg_api_port=$(config-get api-listening-port)
service_ctl cinder-api stop
if [[ -n "$(peer_units)" ]] || is_clustered ; then
# haproxy may already be configured. need to push it back in the request
# pipeline in preparation for a change from:
# from: haproxy (8776) -> cinder-api (8766)
# to: ssl (8776) -> haproxy (8766) -> cinder-api (8756)
local next_server=$(determine_haproxy_port $cfg_api_port)
local api_port=$(determine_api_port $cfg_api_port)
configure_haproxy "cinder_api:$next_server:$api_port"
else
# if not clustered, the cinder-api is next in the pipeline.
local api_port=$(determine_api_port $cfg_api_port)
local next_server=$api_port
fi
# setup https to point to either haproxy or directly to api server, depending.
setup_https $cfg_api_port:$next_server
# configure servers to listen on new ports accordingly.
set_or_update osapi_volume_listen_port "$api_port"
service_ctl cinder-api start
local r_id=""
# (re)configure ks endpoint accordingly in ks and nova.
for r_id in $(relation-ids identity-service) ; do
keystone_joined "$r_id"
done
}
do_openstack_upgrade() {
local rel="$1"
shift
local packages=$@
configure_install_source "$rel"
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get \
--option Dpkg::Options::=--force-confnew -y \
install --no-install-recommends $packages
# update new configs for all possible relations
# mysql
for r_id in $(relation-ids shared-db); do
for unit in $(relation-list -r $r_id) ; do
juju-log "$CHARM: Configuring database after upgrade."
db_changed $r_id $unit
done
done
# rabbitmq-server
for r_id in $(relation-ids amqp); do
for unit in $(relation-list -r $r_id) ; do
juju-log "$CHARM: Configuring amqp after upgrade."
amqp_changed $r_id $unit
done
done
# keystone
for r_id in $(relation-ids identity-service); do
for unit in $(relation-list -r $r_id) ; do
juju-log "$CHARM: Configuring identity service after upgrade."
keystone_changed $r_id $unit
done
done
# ceph
local ceph_ids="$(relation-ids ceph)"
[[ -n "$ceph_ids" ]] && apt-get -y install ceph-common python-ceph
for r_id in $ceph_ids ; do
# ensure librbd gets updated with openstack
for unit in $(relation-list -r $r_id) ; do
juju-log "$CHARM: Configuring ceph client after upgarde."
ceph_changed $r_id $unit
done
done
}

View File

@@ -1,21 +1,21 @@
#!/bin/bash -e #!/bin/bash -e
CHARM_DIR=$(dirname $0) HOOKS_DIR="$CHARM_DIR/hooks"
if [[ -e $CHARM_DIR/cinder-common ]] ; then if [[ -e $HOOKS_DIR/cinder-common ]] ; then
. $CHARM_DIR/cinder-common . $HOOKS_DIR/cinder-common
else else
juju-log "ERROR: Could not source cinder-common from $CHARM_DIR." juju-log "ERROR: Could not source cinder-common from $HOOKS_DIR."
exit 1 exit 1
fi fi
install_hook() { install_hook() {
install_source="$(config-get openstack-origin)" install_source="$(config-get openstack-origin)"
# Check if we are deploying to Precise. If so, we need to use # Check if we are deploying to Precise from distro.
# the Cloud Archive instead of the Ubuntu Archive since Cinder # If so, we need to use the Cloud Archive instead of the
# does not exist there (for precise). # Ubuntu Archive since Cinder does not exist there (for precise).
. /etc/lsb-release . /etc/lsb-release
[[ "$DISTRIB_CODENAME" == "precise" ]] && [[ "$DISTRIB_CODENAME" == "precise" && "$install_source" == "distro" ]] &&
install_source="cloud:precise-folsom" install_source="cloud:precise-folsom"
configure_install_source "$install_source" configure_install_source "$install_source"
@@ -35,6 +35,7 @@ install_hook() {
cinder_ctl cinder-volume restart cinder_ctl cinder-volume restart
fi fi
fi fi
configure_https
} }
db_joined() { db_joined() {
@@ -44,8 +45,12 @@ db_joined() {
} }
db_changed() { db_changed() {
db_host=$(relation-get private-address) local r_id="$1"
db_password=$(relation-get password) local unit_id="$2"
local r_arg=""
[[ -n "$r_id" ]] && r_arg="-r $r_id"
db_host=$(relation-get $r_arg db_host $unit_id)
db_password=$(relation-get $r_arg password $unit_id)
[[ -z "$db_host" ]] || [[ -z "$db_password" ]] && [[ -z "$db_host" ]] || [[ -z "$db_password" ]] &&
juju-log "Missing DB_HOST|DB_PASSWORD, peer not ready? Will retry." && juju-log "Missing DB_HOST|DB_PASSWORD, peer not ready? Will retry." &&
@@ -55,9 +60,10 @@ db_changed() {
cinder_db=$(config-get cinder-db) cinder_db=$(config-get cinder-db)
juju-log "cinder: Configuring cinder for database access to $cinder_db@$db_host" juju-log "cinder: Configuring cinder for database access to $cinder_db@$db_host"
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$cinder_db" set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$cinder_db"
cinder_ctl all stop if eligible_leader 'res_cinder_vip'; then
/usr/bin/cinder-manage db sync /usr/bin/cinder-manage db sync
cinder_ctl all start fi
cinder_ctl all restart
} }
amqp_joined() { amqp_joined() {
@@ -67,10 +73,23 @@ amqp_joined() {
} }
amqp_changed() { amqp_changed() {
rabbit_host=$(relation-get private-address) local r_id="$1"
rabbit_password=$(relation-get password) local unit_id="$2"
local r_arg=""
[[ -n "$r_id" ]] && r_arg="-r $r_id"
rabbit_host=$(relation-get $r_arg private-address $unit_id)
rabbit_password=$(relation-get $r_arg password $unit_id)
[[ -z "$rabbit_host" ]] || [[ -z "$rabbit_password" ]] && [[ -z "$rabbit_host" ]] || [[ -z "$rabbit_password" ]] &&
juju-log "Missing rabbit_host||rabbit_passwd, peer not ready? Will retry." && exit 0 juju-log "Missing rabbit_host||rabbit_passwd, peer not ready? Will retry." && exit 0
local clustered=$(relation-get $r_arg clustered $unit_id)
if [[ -n "$clustered" ]] ; then
juju-log "$CHARM - amqp_changed: Configuring for access to haclustered "\
"rabbitmq service."
local vip=$(relation-get $r_arg vip $unit_id)
[[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered bu no vip."\
&& exit 0
rabbit_host="$vip"
fi
juju-log "cinder: Configuring cinder for amqp access to $rabbit_host:$rabbit_vhost" juju-log "cinder: Configuring cinder for amqp access to $rabbit_host:$rabbit_vhost"
rabbit_user=$(config-get rabbit-user) rabbit_user=$(config-get rabbit-user)
rabbit_vhost=$(config-get rabbit-vhost) rabbit_vhost=$(config-get rabbit-vhost)
@@ -82,30 +101,44 @@ amqp_changed() {
} }
keystone_joined() { keystone_joined() {
port=$(config-get api-listening-port) # Exit hook execution if unit is not leader of cluster/service
url="http://$(unit-get private-address):$port/v1/\$(tenant_id)s" eligible_leader 'res_cinder_vip' || return 0
relation-set service="cinder" \
region="RegionOne" public_url="$url" admin_url="$url" internal_url="$url" # determine correct endpoint URL
https && scheme="https" || scheme="http"
is_clustered && local host=$(config-get vip) ||
local host=$(unit-get private-address)
local url="$scheme://$host:$(config-get api-listening-port)/v1/\$(tenant_id)s"
r_id=""
if [[ -n "$1" ]] ; then
r_id="-r $1"
fi
relation-set $r_id service="cinder" \
region="$(config-get region)" public_url="$url" admin_url="$url" internal_url="$url"
} }
keystone_changed() { keystone_changed() {
service_port=$(relation-get service_port) local r_id="$1"
auth_port=$(relation-get auth_port) local unit_id="$2"
service_username=$(relation-get service_username) local r_arg=""
service_password=$(relation-get service_password) [[ -n "$r_id" ]] && r_arg="-r $r_id"
service_tenant=$(relation-get service_tenant) service_port=$(relation-get $r_arg service_port $unit_id)
auth_port=$(relation-get $r_arg auth_port $unit_id)
service_username=$(relation-get $r_arg service_username $unit_id)
service_password=$(relation-get $r_arg service_password $unit_id)
service_tenant=$(relation-get $r_arg service_tenant $unit_id)
service_host=$(relation-get $r_arg service_host $unit_id)
auth_host=$(relation-get $r_arg auth_host $unit_id)
[[ -z "$service_port" ]] || [[ -z "$auth_port" ]] || [[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] || [[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
[[ -z "$service_tenant" ]] && juju-log "keystone_changed: Peer not ready" && [[ -z "$service_tenant" ]] && juju-log "keystone_changed: Peer not ready" &&
exit 0 exit 0
keystone_host=$(relation-get private-address)
# update keystone authtoken settings accordingly # update keystone authtoken settings accordingly
set_or_update "service_host" "$keystone_host" "$API_CONF" set_or_update "service_host" "$service_host" "$API_CONF"
set_or_update "service_port" "$service_port" "$API_CONF" set_or_update "service_port" "$service_port" "$API_CONF"
set_or_update "auth_host" "$keystone_host" "$API_CONF" set_or_update "auth_host" "$auth_host" "$API_CONF"
set_or_update "auth_port" "$auth_port" "$API_CONF" set_or_update "auth_port" "$auth_port" "$API_CONF"
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF" set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
set_or_update "admin_user" "$service_username" "$API_CONF" set_or_update "admin_user" "$service_username" "$API_CONF"
@@ -114,6 +147,7 @@ keystone_changed() {
set_or_update "auth_strategy" "keystone" "$CINDER_CONF" set_or_update "auth_strategy" "keystone" "$CINDER_CONF"
cinder_ctl all restart cinder_ctl all restart
configure_https
} }
function ceph_joined { function ceph_joined {
@@ -122,9 +156,13 @@ function ceph_joined {
} }
function ceph_changed { function ceph_changed {
local r_id="$1"
local unit_id="$2"
local r_arg=""
[[ -n "$r_id" ]] && r_arg="-r $r_id"
SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1` SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1`
KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring
KEY=`relation-get key` KEY=`relation-get $r_arg key $unit_id`
if [ -n "$KEY" ]; then if [ -n "$KEY" ]; then
# But only once # But only once
if [ ! -f $KEYRING ]; then if [ ! -f $KEYRING ]; then
@@ -138,36 +176,146 @@ function ceph_changed {
exit 0 exit 0
fi fi
MONS=`relation-list` MONS=`relation-list $r_arg`
mon_hosts="" mon_hosts=""
for mon in $MONS; do for mon in $MONS; do
mon_hosts="$mon_hosts $(get_ip $(relation-get private-address $mon)):6789" mon_hosts="$mon_hosts $(get_ip $(relation-get $r_arg private-address $mon)):6789"
done done
cat > /etc/ceph/ceph.conf << EOF cat > /etc/ceph/ceph.conf << EOF
[global] [global]
auth supported = $(relation-get auth) auth supported = $(relation-get $r_id auth $unit_id)
keyring = /etc/ceph/\$cluster.\$name.keyring keyring = /etc/ceph/\$cluster.\$name.keyring
mon host = $mon_hosts mon host = $mon_hosts
EOF EOF
# XXX: Horrid kludge to make cinder-volume use # XXX: Horrid kludge to make cinder-volume use
# a different ceph username than admin # a different ceph username than admin
echo "CEPH_ARGS=--id $SERVICE_NAME" >> /etc/environment if [ -z "`grep CEPH_ARGS /etc/environment`" ]; then
# Only insert environment var if we don't already have it
echo "CEPH_ARGS=\"--id $SERVICE_NAME\"" >> /etc/environment
fi
# Also add it to the overrides for cinder volume # Also add it to the overrides for cinder volume
# in preparation for move to start-stop-daemon. # in preparation for move to start-stop-daemon.
echo "env CEPH_ARGS=\"--id $SERVICE_NAME\"" > /etc/init/cinder-volume.override echo "env CEPH_ARGS=\"--id $SERVICE_NAME\"" > /etc/init/cinder-volume.override
# Create the cinder pool if it does not already exist # Only the leader should try to create pools
if ! rados --id $SERVICE_NAME lspools | grep -q cinder; then if eligible_leader 'res_cinder_vip'; then
rados --id $SERVICE_NAME mkpool cinder # Create the cinder pool if it does not already exist
if ! rados --id $SERVICE_NAME lspools | grep -q cinder; then
rados --id $SERVICE_NAME mkpool cinder
fi
fi fi
# Reconfigure cinder-volume # Reconfigure cinder-volume
set_or_update volume_driver cinder.volume.driver.RBDDriver set_or_update volume_driver cinder.volume.driver.RBDDriver
set_or_update rbd_pool cinder set_or_update rbd_pool cinder
# Set host to service name to ensure that requests get
# distributed across all cinder servers in a cluster
# as they can all service ceph requests.
set_or_update host "$SERVICE_NAME"
cinder_ctl "cinder-volume" restart cinder_ctl "cinder-volume" restart
} }
function cluster_changed() {
service_enabled "api" || return 0
[[ -z "$(peer_units)" ]] &&
juju-log "cluster_changed() with no peers." && exit 0
local cfg_api_port="$(config-get api-listening-port)"
local haproxy_port="$(determine_haproxy_port $cfg_api_port)"
local backend_port="$(determine_api_port $cfg_api_port)"
service cinder-api stop || :
configure_haproxy "cinder_api:$haproxy_port:$backend_port"
set_or_update osapi_volume_listen_port "$backend_port"
service cinder-api start
}
function upgrade_charm() {
cluster_changed
}
function ha_relation_joined() {
local corosync_bindiface=`config-get ha-bindiface`
local corosync_mcastport=`config-get ha-mcastport`
local vip=`config-get vip`
local vip_iface=`config-get vip_iface`
local vip_cidr=`config-get vip_cidr`
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
[ -n "$corosync_mcastport" ]; then
# TODO: This feels horrible but the data required by the hacluster
# charm is quite complex and is python ast parsed.
resources="{
'res_cinder_vip':'ocf:heartbeat:IPaddr2',
'res_cinder_haproxy':'lsb:haproxy'
}"
resource_params="{
'res_cinder_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
'res_cinder_haproxy': 'op monitor interval=\"5s\"'
}"
init_services="{
'res_cinder_haproxy':'haproxy'
}"
clones="{
'cl_cinder_haproxy': 'res_cinder_haproxy'
}"
relation-set corosync_bindiface=$corosync_bindiface \
corosync_mcastport=$corosync_mcastport \
resources="$resources" resource_params="$resource_params" \
init_services="$init_services" clones="$clones"
else
juju-log "Insufficient configuration data to configure hacluster"
exit 1
fi
}
function ha_relation_changed() {
local clustered=`relation-get clustered`
if [ -n "$clustered" ] && is_leader 'res_cinder_vip'; then
juju-log "Cluster leader, reconfiguring keystone endpoint"
https && local scheme="https" || local scheme="http"
local url="$scheme://$(config-get vip):$(config-get api-listening-port)/v1/\$(tenant_id)s"
local r_id=""
for r_id in `relation-ids identity-service`; do
relation-set -r $r_id service="cinder" \
region="$(config-get region)" \
public_url="$url" admin_url="$url" internal_url="$url"
done
fi
}
function config_changed() {
# possibly upgrade if openstack-origin has been bumped
local install_src=$(config-get openstack-origin)
local cur=$(get_os_codename_package "cinder-common")
local available=$(get_os_codename_install_source "$install_src")
if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
$(get_os_version_codename "$available") ; then
juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
# need to explicitly upgrade ksc b/c (LP: 1182689)
do_openstack_upgrade "$install_src" $(determine_packages) python-keystoneclient
fi
configure_https
# Save our scriptrc env variables for health checks
declare -a env_vars=(
"OPENSTACK_PORT_MCASTPORT=$(config-get ha-mcastport)"
'OPENSTACK_SERVICE_API=cinder-api'
'OPENSTACK_SERVICE_SCHEDULER=cinder-scheduler'
'OPENSTACK_SERVICE_VOLUME=cinder-volume')
save_script_rc ${env_vars[@]}
}
function image-service_changed {
GLANCE_API_SERVER=`relation-get glance-api-server`
if [[ -z $GLANCE_API_SERVER ]] ; then
echo "image-service_changed: GLANCE_API_SERVER not yet set. Exit 0 and retry"
exit 0
fi
set_or_update glance_api_servers $GLANCE_API_SERVER
apt-get -y install qemu-utils
cinder_ctl all restart
}
arg0=$(basename $0) arg0=$(basename $0)
juju-log "cinder: Attempting to fire hook for: $arg0" juju-log "cinder: Attempting to fire hook for: $arg0"
case $arg0 in case $arg0 in
@@ -184,5 +332,12 @@ case $arg0 in
"ceph-relation-changed") ceph_changed;; "ceph-relation-changed") ceph_changed;;
"cinder-volume-service-relation-joined") exit 0 ;; "cinder-volume-service-relation-joined") exit 0 ;;
"cinder-volume-service-relation-changed") exit 0 ;; "cinder-volume-service-relation-changed") exit 0 ;;
"cluster-relation-changed") cluster_changed ;;
"cluster-relation-departed") cluster_changed ;;
"image-service-relation-changed") image-service_changed ;;
"ha-relation-joined") ha_relation_joined ;;
"ha-relation-changed") ha_relation_changed ;;
"upgrade-charm") upgrade_charm ;;
"config-changed") config_changed ;;
*) exit 0 *) exit 0
esac esac

View File

@@ -0,0 +1 @@
cinder-hooks

View File

@@ -0,0 +1 @@
cinder-hooks

1
hooks/config-changed Symbolic link
View File

@@ -0,0 +1 @@
cinder-hooks

1
hooks/ha-relation-changed Symbolic link
View File

@@ -0,0 +1 @@
cinder-hooks

1
hooks/ha-relation-joined Symbolic link
View File

@@ -0,0 +1 @@
cinder-hooks

View File

@@ -0,0 +1 @@
cinder-hooks

View File

@@ -20,6 +20,9 @@ function service_ctl_status {
function service_ctl { function service_ctl {
# control a specific service, or all (as defined by $SERVICES) # control a specific service, or all (as defined by $SERVICES)
# service restarts will only occur depending on global $CONFIG_CHANGED,
# which should be updated in charm's set_or_update().
local config_changed=${CONFIG_CHANGED:-True}
if [[ $1 == "all" ]] ; then if [[ $1 == "all" ]] ; then
ctl="$SERVICES" ctl="$SERVICES"
else else
@@ -37,12 +40,21 @@ function service_ctl {
"stop") "stop")
service_ctl_status $i && service $i stop || return 0 ;; service_ctl_status $i && service $i stop || return 0 ;;
"restart") "restart")
service_ctl_status $i && service $i restart || service $i start ;; if [[ "$config_changed" == "True" ]] ; then
service_ctl_status $i && service $i restart || service $i start
fi
;;
esac esac
if [[ $? != 0 ]] ; then if [[ $? != 0 ]] ; then
juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action" juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
fi fi
done done
# all configs should have been reloaded on restart of all services, reset
# flag if its being used.
if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
[[ "$ctl" == "all" ]]; then
CONFIG_CHANGED="False"
fi
} }
function configure_install_source { function configure_install_source {
@@ -165,8 +177,9 @@ get_os_codename_install_source() {
fi fi
# have a guess based on the deb string provided # have a guess based on the deb string provided
if [[ "${rel:0:3}" == "deb" ]]; then if [[ "${rel:0:3}" == "deb" ]] || \
CODENAMES="diablo essex folsom grizzly" [[ "${rel:0:3}" == "ppa" ]] ; then
CODENAMES="diablo essex folsom grizzly havana"
for cname in $CODENAMES; do for cname in $CODENAMES; do
if echo $rel | grep -q $cname; then if echo $rel | grep -q $cname; then
codename=$cname codename=$cname
@@ -178,11 +191,13 @@ get_os_codename_install_source() {
get_os_codename_package() { get_os_codename_package() {
local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none" local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
case "${pkg_vers:0:6}" in case "${pkg_vers:0:6}" in
"2011.2") echo "diablo" ;; "2011.2") echo "diablo" ;;
"2012.1") echo "essex" ;; "2012.1") echo "essex" ;;
"2012.2") echo "folsom" ;; "2012.2") echo "folsom" ;;
"2013.1") echo "grizzly" ;; "2013.1") echo "grizzly" ;;
"2013.2") echo "havana" ;;
esac esac
} }
@@ -191,7 +206,8 @@ get_os_version_codename() {
"diablo") echo "2011.2" ;; "diablo") echo "2011.2" ;;
"essex") echo "2012.1" ;; "essex") echo "2012.1" ;;
"folsom") echo "2012.2" ;; "folsom") echo "2012.2" ;;
"grizzly") echo "2012.3" ;; "grizzly") echo "2013.1" ;;
"havana") echo "2013.2" ;;
esac esac
} }
@@ -314,3 +330,452 @@ function get_block_device() {
echo "$found" echo "$found"
return 0 return 0
} }
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
HAPROXY_DEFAULT=/etc/default/haproxy
##########################################################################
# Description: Configures HAProxy services for Openstack API's
# Parameters:
# Space delimited list of service:port:mode combinations for which
# haproxy service configuration should be generated for. The function
# assumes the name of the peer relation is 'cluster' and that every
# service unit in the peer relation is running the same services.
#
# Services that do not specify :mode in parameter will default to http.
#
# Example
# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
##########################################################################
configure_haproxy() {
local address=`unit-get private-address`
local name=${JUJU_UNIT_NAME////-}
cat > $HAPROXY_CFG << EOF
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
defaults
log global
mode http
option httplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
timeout client 30000
timeout server 30000
listen stats :8888
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
EOF
for service in $@; do
local service_name=$(echo $service | cut -d : -f 1)
local haproxy_listen_port=$(echo $service | cut -d : -f 2)
local api_listen_port=$(echo $service | cut -d : -f 3)
local mode=$(echo $service | cut -d : -f 4)
[[ -z "$mode" ]] && mode="http"
juju-log "Adding haproxy configuration entry for $service "\
"($haproxy_listen_port -> $api_listen_port)"
cat >> $HAPROXY_CFG << EOF
listen $service_name 0.0.0.0:$haproxy_listen_port
balance roundrobin
mode $mode
option ${mode}log
server $name $address:$api_listen_port check
EOF
local r_id=""
local unit=""
for r_id in `relation-ids cluster`; do
for unit in `relation-list -r $r_id`; do
local unit_name=${unit////-}
local unit_address=`relation-get -r $r_id private-address $unit`
if [ -n "$unit_address" ]; then
echo " server $unit_name $unit_address:$api_listen_port check" \
>> $HAPROXY_CFG
fi
done
done
done
echo "ENABLED=1" > $HAPROXY_DEFAULT
service haproxy restart
}
##########################################################################
# Description: Query HA interface to determine is cluster is configured
# Returns: 0 if configured, 1 if not configured
##########################################################################
is_clustered() {
local r_id=""
local unit=""
for r_id in $(relation-ids ha); do
if [ -n "$r_id" ]; then
for unit in $(relation-list -r $r_id); do
clustered=$(relation-get -r $r_id clustered $unit)
if [ -n "$clustered" ]; then
juju-log "Unit is haclustered"
return 0
fi
done
fi
done
juju-log "Unit is not haclustered"
return 1
}
##########################################################################
# Description: Return a list of all peers in cluster relations
##########################################################################
peer_units() {
local peers=""
local r_id=""
for r_id in $(relation-ids cluster); do
peers="$peers $(relation-list -r $r_id)"
done
echo $peers
}
##########################################################################
# Description: Determines whether the current unit is the oldest of all
# its peers - supports partial leader election
# Returns: 0 if oldest, 1 if not
##########################################################################
oldest_peer() {
peers=$1
local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
for peer in $peers; do
echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
local r_unit_no=$(echo $peer | cut -d / -f 2)
if (($r_unit_no<$l_unit_no)); then
juju-log "Not oldest peer; deferring"
return 1
fi
done
juju-log "Oldest peer; might take charge?"
return 0
}
##########################################################################
# Description: Determines whether the current service units is the
# leader within a) a cluster of its peers or b) across a
# set of unclustered peers.
# Parameters: CRM resource to check ownership of if clustered
# Returns: 0 if leader, 1 if not
##########################################################################
eligible_leader() {
if is_clustered; then
if ! is_leader $1; then
juju-log 'Deferring action to CRM leader'
return 1
fi
else
peers=$(peer_units)
if [ -n "$peers" ] && ! oldest_peer "$peers"; then
juju-log 'Deferring action to oldest service unit.'
return 1
fi
fi
return 0
}
##########################################################################
# Description: Query Cluster peer interface to see if peered
# Returns: 0 if peered, 1 if not peered
##########################################################################
is_peered() {
local r_id=$(relation-ids cluster)
if [ -n "$r_id" ]; then
if [ -n "$(relation-list -r $r_id)" ]; then
juju-log "Unit peered"
return 0
fi
fi
juju-log "Unit not peered"
return 1
}
##########################################################################
# Description: Determines whether host is owner of clustered services
# Parameters: Name of CRM resource to check ownership of
# Returns: 0 if leader, 1 if not leader
##########################################################################
is_leader() {
hostname=`hostname`
if [ -x /usr/sbin/crm ]; then
if crm resource show $1 | grep -q $hostname; then
juju-log "$hostname is cluster leader."
return 0
fi
fi
juju-log "$hostname is not cluster leader."
return 1
}
##########################################################################
# Description: Determines whether enough data has been provided in
# configuration or relation data to configure HTTPS.
# Parameters: None
# Returns: 0 if HTTPS can be configured, 1 if not.
##########################################################################
https() {
local r_id=""
if [[ -n "$(config-get ssl_cert)" ]] &&
[[ -n "$(config-get ssl_key)" ]] ; then
return 0
fi
for r_id in $(relation-ids identity-service) ; do
for unit in $(relation-list -r $r_id) ; do
if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
[[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
[[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
[[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
return 0
fi
done
done
return 1
}
##########################################################################
# Description: For a given number of port mappings, configures apache2
# HTTPs local reverse proxying using certficates and keys provided in
# either configuration data (preferred) or relation data. Assumes ports
# are not in use (calling charm should ensure that).
# Parameters: Variable number of proxy port mappings as
# $internal:$external.
# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
##########################################################################
enable_https() {
local port_maps="$@"
local http_restart=""
juju-log "Enabling HTTPS for port mappings: $port_maps."
# allow overriding of keystone provided certs with those set manually
# in config.
local cert=$(config-get ssl_cert)
local key=$(config-get ssl_key)
local ca_cert=""
if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
juju-log "Inspecting identity-service relations for SSL certificate."
local r_id=""
cert=""
key=""
ca_cert=""
for r_id in $(relation-ids identity-service) ; do
for unit in $(relation-list -r $r_id) ; do
[[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
[[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
[[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
done
done
[[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
[[ -n "$key" ]] && key=$(echo $key | base64 -di)
[[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
else
juju-log "Using SSL certificate provided in service config."
fi
[[ -z "$cert" ]] || [[ -z "$key" ]] &&
juju-log "Expected but could not find SSL certificate data, not "\
"configuring HTTPS!" && return 1
apt-get -y install apache2
a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
http_restart=1
mkdir -p /etc/apache2/ssl/$CHARM
echo "$cert" >/etc/apache2/ssl/$CHARM/cert
echo "$key" >/etc/apache2/ssl/$CHARM/key
if [[ -n "$ca_cert" ]] ; then
juju-log "Installing Keystone supplied CA cert."
echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
update-ca-certificates --fresh
# XXX TODO: Find a better way of exporting this?
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
[[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
rm -rf /var/www/keystone_juju_ca_cert.crt
ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
/var/www/keystone_juju_ca_cert.crt
fi
fi
for port_map in $port_maps ; do
local ext_port=$(echo $port_map | cut -d: -f1)
local int_port=$(echo $port_map | cut -d: -f2)
juju-log "Creating apache2 reverse proxy vhost for $port_map."
cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
Listen $ext_port
NameVirtualHost *:$ext_port
<VirtualHost *:$ext_port>
ServerName $(unit-get private-address)
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
ProxyPass / http://localhost:$int_port/
ProxyPassReverse / http://localhost:$int_port/
ProxyPreserveHost on
</VirtualHost>
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
END
a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
http_restart=1
done
if [[ -n "$http_restart" ]] ; then
service apache2 restart
fi
}
##########################################################################
# Description: Ensure HTTPS reverse proxying is disabled for given port
# mappings.
# Parameters: Variable number of proxy port mappings as
# $internal:$external.
# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
##########################################################################
disable_https() {
local port_maps="$@"
local http_restart=""
juju-log "Ensuring HTTPS disabled for $port_maps."
( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
for port_map in $port_maps ; do
local ext_port=$(echo $port_map | cut -d: -f1)
local int_port=$(echo $port_map | cut -d: -f2)
if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
http_restart=1
fi
done
if [[ -n "$http_restart" ]] ; then
service apache2 restart
fi
}
##########################################################################
# Description: Ensures HTTPS is either enabled or disabled for given port
# mapping.
# Parameters: Variable number of proxy port mappings as
# $internal:$external.
# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
##########################################################################
setup_https() {
# configure https via apache reverse proxying either
# using certs provided by config or keystone.
[[ -z "$CHARM" ]] &&
error_out "setup_https(): CHARM not set."
if ! https ; then
disable_https $@
else
enable_https $@
fi
}
##########################################################################
# Description: Determine correct API server listening port based on
# existence of HTTPS reverse proxy and/or haproxy.
# Paremeters: The standard public port for given service.
# Returns: The correct listening port for API service.
##########################################################################
determine_api_port() {
local public_port="$1"
local i=0
( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
https >/dev/null 2>&1 && i=$[$i + 1]
echo $[$public_port - $[$i * 10]]
}
##########################################################################
# Description: Determine correct proxy listening port based on public IP +
# existence of HTTPS reverse proxy.
# Paremeters: The standard public port for given service.
# Returns: The correct listening port for haproxy service public address.
##########################################################################
determine_haproxy_port() {
local public_port="$1"
local i=0
https >/dev/null 2>&1 && i=$[$i + 1]
echo $[$public_port - $[$i * 10]]
}
##########################################################################
# Description: Print the value for a given config option in an OpenStack
# .ini style configuration file.
# Parameters: File path, option to retrieve, optional
# section name (default=DEFAULT)
# Returns: Prints value if set, prints nothing otherwise.
##########################################################################
local_config_get() {
# return config values set in openstack .ini config files.
# default placeholders starting (eg, %AUTH_HOST%) treated as
# unset values.
local file="$1"
local option="$2"
local section="$3"
[[ -z "$section" ]] && section="DEFAULT"
python -c "
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('$file')
try:
value = config.get('$section', '$option')
except:
print ''
exit(0)
if value.startswith('%'): exit(0)
print value
"
}
##########################################################################
# Description: Creates an rc file exporting environment variables to a
# script_path local to the charm's installed directory.
# Any charm scripts run outside the juju hook environment can source this
# scriptrc to obtain updated config information necessary to perform health
# checks or service changes
#
# Parameters:
# An array of '=' delimited ENV_VAR:value combinations to export.
# If optional script_path key is not provided in the array, script_path
# defaults to scripts/scriptrc
##########################################################################
function save_script_rc {
if [ ! -n "$JUJU_UNIT_NAME" ]; then
echo "Error: Missing JUJU_UNIT_NAME environment variable"
exit 1
fi
# our default unit_path
unit_path="$CHARM_DIR/scripts/scriptrc"
echo $unit_path
tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
echo "#!/bin/bash" > $tmp_rc
for env_var in "${@}"
do
if `echo $env_var | grep -q script_path`; then
# well then we need to reset the new unit-local script path
unit_path="$CHARM_DIR/${env_var/script_path=/}"
else
echo "export $env_var" >> $tmp_rc
fi
done
chmod 755 $tmp_rc
mv $tmp_rc $unit_path
}

1
hooks/upgrade-charm Symbolic link
View File

@@ -0,0 +1 @@
cinder-hooks

View File

@@ -15,3 +15,11 @@ requires:
interface: keystone interface: keystone
ceph: ceph:
interface: ceph-client interface: ceph-client
image-service:
interface: glance
ha:
interface: hacluster
scope: container
peers:
cluster:
interface: cinder-ha

View File

@@ -1 +1 @@
5 27

13
scripts/add_to_cluster Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
service corosync start || /bin/true
sleep 2
while ! service pacemaker start; do
echo "Attempting to start pacemaker"
sleep 1;
done;
crm node online
sleep 2
while crm status | egrep -q 'Stopped$'; do
echo "Waiting for nodes to come online"
sleep 1
done

4
scripts/remove_from_cluster Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
crm node standby
service pacemaker stop
service corosync stop