450 lines
15 KiB
Bash
Executable File
450 lines
15 KiB
Bash
Executable File
#!/bin/bash -e
|
|
|
|
HOOKS_DIR="$CHARM_DIR/hooks"
|
|
ARG0=${0##*/}
|
|
|
|
if [[ -e $HOOKS_DIR/glance-common ]] ; then
|
|
. $HOOKS_DIR/glance-common
|
|
else
|
|
echo "ERROR: Could not load glance-common from $HOOKS_DIR"
|
|
fi
|
|
|
|
function install_hook {
|
|
juju-log "Installing glance packages"
|
|
apt-get -y install python-software-properties || exit 1
|
|
|
|
configure_install_source "$(config-get openstack-origin)"
|
|
|
|
apt-get update || exit 1
|
|
apt-get -y install $PACKAGES || exit 1
|
|
|
|
service_ctl all stop
|
|
|
|
# TODO: Make debug logging a config option.
|
|
set_or_update verbose True api
|
|
set_or_update debug True api
|
|
set_or_update verbose True registry
|
|
set_or_update debug True registry
|
|
|
|
configure_https
|
|
}
|
|
|
|
function db_joined {
|
|
local glance_db=$(config-get glance-db)
|
|
local db_user=$(config-get db-user)
|
|
local hostname=$(unit-get private-address)
|
|
juju-log "$CHARM - db_joined: requesting database access to $glance_db for "\
|
|
"$db_user@$hostname"
|
|
relation-set database=$glance_db username=$db_user hostname=$hostname
|
|
}
|
|
|
|
function db_changed {
|
|
# serves as the main shared-db changed hook but may also be called with a
|
|
# relation-id to configure new config files for existing relations.
|
|
local r_id="$1"
|
|
local r_args=""
|
|
if [[ -n "$r_id" ]] ; then
|
|
# set up environment for an existing relation to a single unit.
|
|
export JUJU_REMOTE_UNIT=$(relation-list -r $r_id | head -n1)
|
|
export JUJU_RELATION="shared-db"
|
|
export JUJU_RELATION_ID="$r_id"
|
|
local r_args="-r $JUJU_RELATION_ID"
|
|
juju-log "$CHARM - db_changed: Running hook for existing relation to "\
|
|
"$JUJU_REMOTE_UNIT-$JUJU_RELATION_ID"
|
|
fi
|
|
|
|
local db_host=$(relation-get $r_args db_host)
|
|
local db_password=$(relation-get $r_args password)
|
|
|
|
if [[ -z "$db_host" ]] || [[ -z "$db_password" ]] ; then
|
|
juju-log "$CHARM - db_changed: db_host||db_password set, will retry."
|
|
exit 0
|
|
fi
|
|
|
|
local glance_db=$(config-get glance-db)
|
|
local db_user=$(config-get db-user)
|
|
local rel=$(get_os_codename_package glance-common)
|
|
|
|
if [[ -n "$r_id" ]] ; then
|
|
unset JUJU_REMOTE_UNIT JUJU_RELATION JUJU_RELATION_ID
|
|
fi
|
|
|
|
juju-log "$CHARM - db_changed: Configuring glance.conf for access to $glance_db"
|
|
|
|
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$glance_db" registry
|
|
|
|
# since folsom, a db connection setting in glance-api.conf is required.
|
|
[[ "$rel" != "essex" ]] &&
|
|
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$glance_db" api
|
|
|
|
if eligible_leader 'res_glance_vip'; then
|
|
if [[ "$rel" == "essex" ]] ; then
|
|
# Essex required initializing new databases to version 0
|
|
if ! glance-manage db_version >/dev/null 2>&1; then
|
|
juju-log "Setting glance database version to 0"
|
|
glance-manage version_control 0
|
|
fi
|
|
fi
|
|
juju-log "$CHARM - db_changed: Running database migrations for $rel."
|
|
glance-manage db_sync
|
|
fi
|
|
service_ctl all restart
|
|
}
|
|
|
|
function image-service_joined {
|
|
# Check to see if unit is potential leader
|
|
local r_id="$1"
|
|
[[ -n "$r_id" ]] && r_id="-r $r_id"
|
|
eligible_leader 'res_glance_vip' || return 0
|
|
https && scheme="https" || scheme="http"
|
|
is_clustered && local host=$(config-get vip) ||
|
|
local host=$(unit-get private-address)
|
|
url="$scheme://$host:9292"
|
|
juju-log "glance: image-service_joined: To peer glance-api-server=$url"
|
|
relation-set $r_id glance-api-server=$url
|
|
}
|
|
|
|
function object-store_joined {
|
|
local relids="$(relation-ids identity-service)"
|
|
[[ -z "$relids" ]] && \
|
|
juju-log "$CHARM: Deferring swift store configuration until " \
|
|
"an identity-service relation exists." && exit 0
|
|
|
|
set_or_update default_store swift api
|
|
set_or_update swift_store_create_container_on_put true api
|
|
|
|
for relid in $relids ; do
|
|
local unit=$(relation-list -r $relid)
|
|
local svc_tenant=$(relation-get -r $relid service_tenant $unit)
|
|
local svc_username=$(relation-get -r $relid service_username $unit)
|
|
local svc_password=$(relation-get -r $relid service_password $unit)
|
|
local auth_host=$(relation-get -r $relid private-address $unit)
|
|
local port=$(relation-get -r $relid service_port $unit)
|
|
local auth_url=""
|
|
|
|
[[ -n "$auth_host" ]] && [[ -n "$port" ]] &&
|
|
auth_url="http://$auth_host:$port/v2.0/"
|
|
|
|
[[ -n "$svc_tenant" ]] && [[ -n "$svc_username" ]] &&
|
|
set_or_update swift_store_user "$svc_tenant:$svc_username" api
|
|
[[ -n "$svc_password" ]] &&
|
|
set_or_update swift_store_key "$svc_password" api
|
|
[[ -n "$auth_url" ]] &&
|
|
set_or_update swift_store_auth_address "$auth_url" api
|
|
done
|
|
service_ctl glance-api restart
|
|
}
|
|
|
|
function object-store_changed {
|
|
exit 0
|
|
}
|
|
|
|
function ceph_joined {
|
|
mkdir -p /etc/ceph
|
|
apt-get -y install ceph-common python-ceph || exit 1
|
|
}
|
|
|
|
function ceph_changed {
|
|
local r_id="$1"
|
|
local unit_id="$2"
|
|
local r_arg=""
|
|
[[ -n "$r_id" ]] && r_arg="-r $r_id"
|
|
SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1`
|
|
KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring
|
|
KEY=`relation-get $r_arg key $unit_id`
|
|
if [ -n "$KEY" ]; then
|
|
# But only once
|
|
if [ ! -f $KEYRING ]; then
|
|
ceph-authtool $KEYRING \
|
|
--create-keyring --name=client.$SERVICE_NAME \
|
|
--add-key="$KEY"
|
|
chmod +r $KEYRING
|
|
fi
|
|
else
|
|
# No key - bail for the time being
|
|
exit 0
|
|
fi
|
|
|
|
MONS=`relation-list $r_arg`
|
|
mon_hosts=""
|
|
for mon in $MONS; do
|
|
mon_hosts="$mon_hosts $(get_ip $(relation-get $r_arg private-address $mon)):6789,"
|
|
done
|
|
cat > /etc/ceph/ceph.conf << EOF
|
|
[global]
|
|
auth supported = $(relation-get $r_arg auth $unit_id)
|
|
keyring = /etc/ceph/\$cluster.\$name.keyring
|
|
mon host = $mon_hosts
|
|
EOF
|
|
|
|
# Create the images pool if it does not already exist
|
|
if ! rados --id $SERVICE_NAME lspools | grep -q images; then
|
|
rados --id $SERVICE_NAME mkpool images
|
|
fi
|
|
|
|
# Configure glance for ceph storage options
|
|
set_or_update default_store rbd api
|
|
set_or_update rbd_store_ceph_conf /etc/ceph/ceph.conf api
|
|
set_or_update rbd_store_user $SERVICE_NAME api
|
|
set_or_update rbd_store_pool images api
|
|
set_or_update rbd_store_chunk_size 8 api
|
|
service_ctl glance-api restart
|
|
}
|
|
|
|
function keystone_joined {
|
|
# Leadership check
|
|
eligible_leader 'res_glance_vip' || return 0
|
|
local r_id="$1"
|
|
[[ -n "$r_id" ]] && r_id=" -r $r_id"
|
|
|
|
# determine correct endpoint URL
|
|
https && scheme="https" || scheme="http"
|
|
is_clustered && local host=$(config-get vip) ||
|
|
local host=$(unit-get private-address)
|
|
url="$scheme://$host:9292"
|
|
|
|
# advertise our API endpoint to keystone
|
|
relation-set service="glance" \
|
|
region="$(config-get region)" public_url=$url admin_url=$url internal_url=$url
|
|
}
|
|
|
|
function keystone_changed {
|
|
# serves as the main identity-service changed hook, but may also be called
|
|
# with a relation-id to configure new config files for existing relations.
|
|
local r_id="$1"
|
|
local r_args=""
|
|
if [[ -n "$r_id" ]] ; then
|
|
# set up environment for an existing relation to a single unit.
|
|
export JUJU_REMOTE_UNIT=$(relation-list -r $r_id | head -n1)
|
|
export JUJU_RELATION="identity-service"
|
|
export JUJU_RELATION_ID="$r_id"
|
|
local r_args="-r $JUJU_RELATION_ID"
|
|
juju-log "$CHARM - db_changed: Running hook for existing relation to "\
|
|
"$JUJU_REMOTE_UNIT-$JUJU_RELATION_ID"
|
|
fi
|
|
|
|
token=$(relation-get $r_args $r_args admin_token)
|
|
service_port=$(relation-get $r_args service_port)
|
|
auth_port=$(relation-get $r_args auth_port)
|
|
service_username=$(relation-get $r_args service_username)
|
|
service_password=$(relation-get $r_args service_password)
|
|
service_tenant=$(relation-get $r_args service_tenant)
|
|
[[ -z "$token" ]] || [[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
|
|
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
|
|
[[ -z "$service_tenant" ]] && juju-log "keystone_changed: Peer not ready" &&
|
|
exit 0
|
|
[[ "$token" == "-1" ]] &&
|
|
juju-log "keystone_changed: admin token error" && exit 1
|
|
juju-log "keystone_changed: Acquired admin. token"
|
|
keystone_host=$(relation-get $r_args auth_host)
|
|
|
|
if [[ -n "$r_id" ]] ; then
|
|
unset JUJU_REMOTE_UNIT JUJU_RELATION JUJU_RELATION_ID
|
|
fi
|
|
|
|
set_or_update "flavor" "keystone" "api" "paste_deploy"
|
|
set_or_update "flavor" "keystone" "registry" "paste_deploy"
|
|
|
|
local sect="filter:authtoken"
|
|
for i in api-paste registry-paste ; do
|
|
set_or_update "service_host" "$keystone_host" $i $sect
|
|
set_or_update "service_port" "$service_port" $i $sect
|
|
set_or_update "auth_host" "$keystone_host" $i $sect
|
|
set_or_update "auth_port" "$auth_port" $i $sect
|
|
set_or_update "auth_uri" "http://$keystone_host:$service_port/" $i $sect
|
|
set_or_update "admin_token" "$token" $i $sect
|
|
set_or_update "admin_tenant_name" "$service_tenant" $i $sect
|
|
set_or_update "admin_user" "$service_username" $i $sect
|
|
set_or_update "admin_password" "$service_password" $i $sect
|
|
done
|
|
service_ctl all restart
|
|
|
|
# Configure any object-store / swift relations now that we have an
|
|
# identity-service
|
|
if [[ -n "$(relation-ids object-store)" ]] ; then
|
|
object-store_joined
|
|
fi
|
|
|
|
# possibly configure HTTPS for API and registry
|
|
configure_https
|
|
}
|
|
|
|
function config_changed() {
|
|
# Determine whether or not we should do an upgrade, based on whether or not
|
|
# the version offered in openstack-origin is greater than what is installed.
|
|
|
|
local install_src=$(config-get openstack-origin)
|
|
local cur=$(get_os_codename_package "glance-common")
|
|
local available=$(get_os_codename_install_source "$install_src")
|
|
|
|
if [[ "$available" != "unknown" ]] ; then
|
|
if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
|
|
$(get_os_version_codename "$available") ; then
|
|
juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
|
|
do_openstack_upgrade "$install_src" $PACKAGES
|
|
fi
|
|
fi
|
|
configure_https
|
|
service_ctl all restart
|
|
|
|
# Save our scriptrc env variables for health checks
|
|
declare -a env_vars=(
|
|
"OPENSTACK_PORT_MCASTPORT=$(config-get ha-mcastport)"
|
|
'OPENSTACK_SERVICE_API=glance-api'
|
|
'OPENSTACK_SERVICE_REGISTRY=glance-registry')
|
|
save_script_rc ${env_vars[@]}
|
|
}
|
|
|
|
function cluster_changed() {
|
|
configure_haproxy "glance_api:9292"
|
|
}
|
|
|
|
function upgrade_charm() {
|
|
cluster_changed
|
|
}
|
|
|
|
function ha_relation_joined() {
|
|
local corosync_bindiface=`config-get ha-bindiface`
|
|
local corosync_mcastport=`config-get ha-mcastport`
|
|
local vip=`config-get vip`
|
|
local vip_iface=`config-get vip_iface`
|
|
local vip_cidr=`config-get vip_cidr`
|
|
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
|
|
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
|
|
[ -n "$corosync_mcastport" ]; then
|
|
# TODO: This feels horrible but the data required by the hacluster
|
|
# charm is quite complex and is python ast parsed.
|
|
resources="{
|
|
'res_glance_vip':'ocf:heartbeat:IPaddr2',
|
|
'res_glance_haproxy':'lsb:haproxy'
|
|
}"
|
|
resource_params="{
|
|
'res_glance_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
|
|
'res_glance_haproxy': 'op monitor interval=\"5s\"'
|
|
}"
|
|
init_services="{
|
|
'res_glance_haproxy':'haproxy'
|
|
}"
|
|
groups="{
|
|
'grp_glance_haproxy':'res_glance_vip res_glance_haproxy'
|
|
}"
|
|
relation-set corosync_bindiface=$corosync_bindiface \
|
|
corosync_mcastport=$corosync_mcastport \
|
|
resources="$resources" resource_params="$resource_params" \
|
|
init_services="$init_services" groups="$groups"
|
|
else
|
|
juju-log "Insufficient configuration data to configure hacluster"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
function ha_relation_changed() {
|
|
local clustered=`relation-get clustered`
|
|
if [ -n "$clustered" ] && is_leader 'res_glance_vip'; then
|
|
local port=$((9292 + 10000))
|
|
local host=$(config-get vip)
|
|
local url="http://$host:$port"
|
|
for r_id in `relation-ids identity-service`; do
|
|
relation-set -r $r_id service="glance" \
|
|
region="$(config-get region)" \
|
|
public_url="$url" admin_url="$url" internal_url="$url"
|
|
done
|
|
for r_id in `relation-ids image-service`; do
|
|
relation-set -r $r_id \
|
|
glance-api-server="$host:$port"
|
|
done
|
|
fi
|
|
}
|
|
|
|
|
|
function cluster_changed() {
|
|
[[ -z "$(peer_units)" ]] &&
|
|
juju-log "cluster_changed() with no peers." && exit 0
|
|
local haproxy_port=$(determine_haproxy_port 9292)
|
|
local backend_port=$(determine_api_port 9292)
|
|
service glance-api stop
|
|
configure_haproxy "glance_api:$haproxy_port:$backend_port"
|
|
set_or_update bind_port "$backend_port" "api"
|
|
service glance-api start
|
|
}
|
|
|
|
function upgrade_charm() {
|
|
cluster_changed
|
|
}
|
|
|
|
function ha_relation_joined() {
|
|
local corosync_bindiface=`config-get ha-bindiface`
|
|
local corosync_mcastport=`config-get ha-mcastport`
|
|
local vip=`config-get vip`
|
|
local vip_iface=`config-get vip_iface`
|
|
local vip_cidr=`config-get vip_cidr`
|
|
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
|
|
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
|
|
[ -n "$corosync_mcastport" ]; then
|
|
# TODO: This feels horrible but the data required by the hacluster
|
|
# charm is quite complex and is python ast parsed.
|
|
resources="{
|
|
'res_glance_vip':'ocf:heartbeat:IPaddr2',
|
|
'res_glance_haproxy':'lsb:haproxy'
|
|
}"
|
|
resource_params="{
|
|
'res_glance_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
|
|
'res_glance_haproxy': 'op monitor interval=\"5s\"'
|
|
}"
|
|
init_services="{
|
|
'res_glance_haproxy':'haproxy'
|
|
}"
|
|
clones="{
|
|
'cl_glance_haproxy': 'res_glance_haproxy'
|
|
}"
|
|
relation-set corosync_bindiface=$corosync_bindiface \
|
|
corosync_mcastport=$corosync_mcastport \
|
|
resources="$resources" resource_params="$resource_params" \
|
|
init_services="$init_services" clones="$clones"
|
|
else
|
|
juju-log "Insufficient configuration data to configure hacluster"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
function ha_relation_changed() {
|
|
local clustered=`relation-get clustered`
|
|
if [ -n "$clustered" ] && is_leader 'res_glance_vip'; then
|
|
local host=$(config-get vip)
|
|
https && local scheme="https" || local scheme="http"
|
|
local url="$scheme://$host:9292"
|
|
|
|
for r_id in `relation-ids identity-service`; do
|
|
relation-set -r $r_id service="glance" \
|
|
region="$(config-get region)" \
|
|
public_url="$url" admin_url="$url" internal_url="$url"
|
|
done
|
|
for r_id in `relation-ids image-service`; do
|
|
relation-set -r $r_id \
|
|
glance-api-server="$scheme://$host:9292"
|
|
done
|
|
fi
|
|
}
|
|
|
|
|
|
case $ARG0 in
|
|
"start"|"stop") service_ctl all $ARG0 ;;
|
|
"install") install_hook ;;
|
|
"config-changed") config_changed ;;
|
|
"shared-db-relation-joined") db_joined ;;
|
|
"shared-db-relation-changed") db_changed;;
|
|
"image-service-relation-joined") image-service_joined ;;
|
|
"image-service-relation-changed") exit 0 ;;
|
|
"object-store-relation-joined") object-store_joined ;;
|
|
"object-store-relation-changed") object-store_changed ;;
|
|
"identity-service-relation-joined") keystone_joined ;;
|
|
"identity-service-relation-changed") keystone_changed ;;
|
|
"ceph-relation-joined") ceph_joined;;
|
|
"ceph-relation-changed") ceph_changed;;
|
|
"cluster-relation-changed") cluster_changed ;;
|
|
"cluster-relation-departed") cluster_changed ;;
|
|
"ha-relation-joined") ha_relation_joined ;;
|
|
"ha-relation-changed") ha_relation_changed ;;
|
|
"upgrade-charm") upgrade_charm ;;
|
|
esac
|