charm-keystone/hooks/keystone_hooks.py

569 lines
20 KiB
Python
Raw Normal View History

2011-12-08 09:52:12 -08:00
#!/usr/bin/python
import time
import urlparse
2013-02-07 21:03:44 -08:00
from base64 import b64encode
from keystone_utils import (
config_dirty,
config_get,
execute,
update_config_block,
set_admin_token,
ensure_initial_admin,
create_service_entry,
create_endpoint_template,
create_role,
get_admin_token,
get_service_password,
create_user,
grant_role,
get_ca,
synchronize_service_credentials,
do_openstack_upgrade,
configure_pki_tokens,
SSH_USER,
SSL_DIR,
CLUSTER_RES,
https
)
from lib.openstack_common import (
get_os_codename_install_source,
get_os_codename_package,
get_os_version_codename,
get_os_version_package,
save_script_rc
)
import lib.unison as unison
import lib.utils as utils
import lib.cluster_utils as cluster
import lib.haproxy_utils as haproxy
2011-12-08 09:52:12 -08:00
from charmhelpers.payload.execd import execd_preinstall
2011-12-08 09:52:12 -08:00
config = config_get()
packages = [
"keystone", "python-mysqldb", "pwgen",
"haproxy", "python-jinja2", "openssl", "unison",
"python-sqlalchemy"
]
2011-12-08 09:52:12 -08:00
service = "keystone"
# used to verify joined services are valid openstack components.
2011-12-23 17:34:15 -08:00
# this should reflect the current "core" components of openstack
# and be expanded as we add support for them as a distro
2011-12-08 09:52:12 -08:00
valid_services = {
"nova": {
"type": "compute",
"desc": "Nova Compute Service"
},
"nova-volume": {
"type": "volume",
"desc": "Nova Volume Service"
},
"cinder": {
"type": "volume",
"desc": "Cinder Volume Service"
},
2012-03-01 12:35:39 -08:00
"ec2": {
"type": "ec2",
"desc": "EC2 Compatibility Layer"
},
2011-12-08 09:52:12 -08:00
"glance": {
"type": "image",
"desc": "Glance Image Service"
2011-12-21 15:29:31 -08:00
},
2012-04-13 17:24:56 -07:00
"s3": {
"type": "s3",
"desc": "S3 Compatible object-store"
},
2011-12-21 15:29:31 -08:00
"swift": {
"type": "object-store",
2011-12-21 15:29:31 -08:00
"desc": "Swift Object Storage Service"
2012-12-03 15:34:43 +00:00
},
"quantum": {
"type": "network",
"desc": "Quantum Networking Service"
},
"oxygen": {
"type": "oxygen",
"desc": "Oxygen Cloud Image Service"
2013-01-31 12:55:04 +01:00
},
"ceilometer": {
"type": "metering",
"desc": "Ceilometer Metering Service"
2013-11-25 16:59:23 +01:00
},
"heat": {
"type": "orchestration",
2013-11-27 12:22:58 +01:00
"desc": "Heat Orchestration API"
2013-11-25 16:59:23 +01:00
},
2013-11-27 12:22:58 +01:00
"heat-cfn": {
"type": "cloudformation",
"desc": "Heat CloudFormation API"
}
2011-12-08 09:52:12 -08:00
}
2011-12-08 09:52:12 -08:00
def install_hook():
execd_preinstall()
utils.configure_source()
2013-03-18 13:42:43 +00:00
utils.install(*packages)
update_config_block('DEFAULT',
public_port=cluster.determine_api_port(config["service-port"]))
update_config_block('DEFAULT',
admin_port=cluster.determine_api_port(config["admin-port"]))
2014-02-03 12:58:43 +01:00
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
set_admin_token(config['admin-token'])
2012-03-01 12:35:39 -08:00
2012-02-28 17:18:17 -08:00
# set all backends to use sql+sqlite, if they are not already by default
update_config_block('sql',
connection='sqlite:////var/lib/keystone/keystone.db')
update_config_block('identity',
driver='keystone.identity.backends.sql.Identity')
update_config_block('catalog',
driver='keystone.catalog.backends.sql.Catalog')
update_config_block('token',
driver='keystone.token.backends.sql.Token')
update_config_block('ec2',
driver='keystone.contrib.ec2.backends.sql.Ec2')
utils.stop('keystone')
2012-02-28 17:18:17 -08:00
execute("keystone-manage db_sync")
utils.start('keystone')
2013-02-07 21:03:44 -08:00
# ensure user + permissions for peer relations that
# may be syncing data there via SSH_USER.
unison.ensure_user(user=SSH_USER, group='keystone')
execute("chmod -R g+wrx /var/lib/keystone/")
2013-02-07 21:03:44 -08:00
time.sleep(5)
2011-12-08 09:52:12 -08:00
ensure_initial_admin(config)
2011-12-08 09:52:12 -08:00
def db_joined():
relation_data = {
"database": config["database"],
"username": config["database-user"],
"hostname": config["hostname"]
}
utils.relation_set(**relation_data)
2011-12-08 09:52:12 -08:00
def db_changed():
relation_data = utils.relation_get_dict()
if ('password' not in relation_data or
2013-02-15 12:23:41 -05:00
'db_host' not in relation_data):
utils.juju_log('INFO',
"db_host or password not set. Peer not ready, exit 0")
return
2012-02-28 17:18:17 -08:00
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
2011-12-08 09:52:12 -08:00
(config["database-user"],
relation_data["password"],
2013-02-15 12:23:41 -05:00
relation_data["db_host"],
2011-12-08 09:52:12 -08:00
config["database"]))
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader, performing db-sync')
execute("keystone-manage db_sync", echo=True)
if config_dirty():
utils.restart('keystone')
time.sleep(5)
2011-12-08 09:52:12 -08:00
if cluster.eligible_leader(CLUSTER_RES):
ensure_initial_admin(config)
# If the backend database has been switched to something new and there
# are existing identity-service relations,, service entries need to be
# recreated in the new database. Re-executing identity-service-changed
# will do this.
for rid in utils.relation_ids('identity-service'):
for unit in utils.relation_list(rid=rid):
utils.juju_log('INFO',
"Re-exec'ing identity-service-changed"
" for: %s - %s" % (rid, unit))
identity_changed(relation_id=rid, remote_unit=unit)
2013-02-07 21:03:44 -08:00
def ensure_valid_service(service):
if service not in valid_services.keys():
utils.juju_log('WARNING',
"Invalid service requested: '%s'" % service)
utils.relation_set(admin_token=-1)
2013-02-07 21:03:44 -08:00
return
def add_endpoint(region, service, publicurl, adminurl, internalurl):
2013-02-07 21:03:44 -08:00
desc = valid_services[service]["desc"]
service_type = valid_services[service]["type"]
create_service_entry(service, service_type, desc)
create_endpoint_template(region=region, service=service,
publicurl=publicurl,
adminurl=adminurl,
internalurl=internalurl)
2013-02-07 21:03:44 -08:00
2011-12-08 09:52:12 -08:00
def identity_joined():
""" Do nothing until we get information about requested service """
pass
def get_requested_roles(settings):
''' Retrieve any valid requested_roles from dict settings '''
if ('requested_roles' in settings and
settings['requested_roles'] not in ['None', None]):
return settings['requested_roles'].split(',')
else:
return []
def identity_changed(relation_id=None, remote_unit=None):
""" A service has advertised its API endpoints, create an entry in the
service catalog.
Optionally allow this hook to be re-fired for an existing
relation+unit, for context see see db_changed().
"""
if not cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Deferring identity_changed() to service leader.')
return
settings = utils.relation_get_dict(relation_id=relation_id,
remote_unit=remote_unit)
# the minimum settings needed per endpoint
single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url'])
if single.issubset(settings):
# other end of relation advertised only one endpoint
if 'None' in [v for k, v in settings.iteritems()]:
# Some backend services advertise no endpoint but require a
# hook execution to update auth strategy.
relation_data = {}
# Check if clustered and use vip + haproxy ports if so
if cluster.is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["service_host"] = config['vip']
else:
relation_data["auth_host"] = config['hostname']
relation_data["service_host"] = config['hostname']
relation_data["auth_port"] = config['admin-port']
relation_data["service_port"] = config['service-port']
2013-03-19 13:50:23 +00:00
if config['https-service-endpoints'] in ['True', 'true']:
# Pass CA cert as client will need it to
# verify https connections
ca = get_ca(user=SSH_USER)
ca_bundle = ca.get_ca_bundle()
relation_data['https_keystone'] = 'True'
relation_data['ca_cert'] = b64encode(ca_bundle)
2013-08-13 13:55:50 +01:00
if relation_id:
relation_data['rid'] = relation_id
# Allow the remote service to request creation of any additional
# roles. Currently used by Horizon
2013-09-03 13:03:32 +01:00
for role in get_requested_roles(settings):
utils.juju_log('INFO',
"Creating requested role: %s" % role)
create_role(role)
utils.relation_set(**relation_data)
return
2013-09-03 09:10:11 +01:00
else:
ensure_valid_service(settings['service'])
add_endpoint(region=settings['region'],
service=settings['service'],
publicurl=settings['public_url'],
adminurl=settings['admin_url'],
internalurl=settings['internal_url'])
service_username = settings['service']
https_cn = urlparse.urlparse(settings['internal_url'])
https_cn = https_cn.hostname
else:
# assemble multiple endpoints from relation data. service name
# should be prepended to setting name, ie:
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
# Results in a dict that looks like:
# { 'ec2': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# 'nova': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# }
endpoints = {}
for k, v in settings.iteritems():
ep = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if ep not in endpoints:
endpoints[ep] = {}
endpoints[ep][x] = v
services = []
https_cn = None
for ep in endpoints:
# weed out any unrelated relation stuff Juju might have added
# by ensuring each possible endpiont has appropriate fields
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
if single.issubset(endpoints[ep]):
ep = endpoints[ep]
ensure_valid_service(ep['service'])
add_endpoint(region=ep['region'], service=ep['service'],
publicurl=ep['public_url'],
adminurl=ep['admin_url'],
internalurl=ep['internal_url'])
services.append(ep['service'])
if not https_cn:
https_cn = urlparse.urlparse(ep['internal_url'])
https_cn = https_cn.hostname
service_username = '_'.join(services)
2011-12-08 09:52:12 -08:00
if 'None' in [v for k, v in settings.iteritems()]:
return
if not service_username:
return
token = get_admin_token()
utils.juju_log('INFO',
"Creating service credentials for '%s'" % service_username)
2012-03-09 14:56:59 -08:00
service_password = get_service_password(service_username)
create_user(service_username, service_password, config['service-tenant'])
grant_role(service_username, config['admin-role'],
config['service-tenant'])
# Allow the remote service to request creation of any additional roles.
# Currently used by Swift and Ceilometer.
2013-09-03 13:03:32 +01:00
for role in get_requested_roles(settings):
utils.juju_log('INFO',
"Creating requested role: %s" % role)
create_role(role, service_username,
config['service-tenant'])
# As of https://review.openstack.org/#change,4675, all nodes hosting
# an endpoint(s) needs a service username and password assigned to
# the service tenant and granted admin role.
# note: config['service-tenant'] is created in utils.ensure_initial_admin()
# we return a token, information about our API endpoints, and the generated
# service credentials
2011-12-08 09:52:12 -08:00
relation_data = {
"admin_token": token,
"service_host": config["hostname"],
"service_port": config["service-port"],
"auth_host": config["hostname"],
"auth_port": config["admin-port"],
"service_username": service_username,
"service_password": service_password,
"service_tenant": config['service-tenant'],
"https_keystone": "False",
"ssl_cert": "",
"ssl_key": "",
"ca_cert": ""
2011-12-08 09:52:12 -08:00
}
2013-02-07 21:03:44 -08:00
if relation_id:
relation_data['rid'] = relation_id
# Check if clustered and use vip + haproxy ports if so
if cluster.is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["service_host"] = config['vip']
# generate or get a new cert/key for service if set to manage certs.
2013-02-07 21:03:44 -08:00
if config['https-service-endpoints'] in ['True', 'true']:
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
ca_bundle = ca.get_ca_bundle()
2013-02-07 21:03:44 -08:00
relation_data['ssl_cert'] = b64encode(cert)
relation_data['ssl_key'] = b64encode(key)
relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True'
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
utils.relation_set(**relation_data)
synchronize_service_credentials()
2011-12-08 09:52:12 -08:00
def config_changed():
unison.ensure_user(user=SSH_USER, group='keystone')
execute("chmod -R g+wrx /var/lib/keystone/")
# Determine whether or not we should do an upgrade, based on the
# the version offered in keyston-release.
available = get_os_codename_install_source(config['openstack-origin'])
installed = get_os_codename_package('keystone')
2012-12-18 19:39:14 -08:00
if (available and
get_os_version_codename(available) > \
get_os_version_codename(installed)):
2013-03-18 13:47:53 +00:00
# TODO: fixup this call to work like utils.install()
do_openstack_upgrade(config['openstack-origin'], ' '.join(packages))
# Ensure keystone group permissions
execute("chmod -R g+wrx /var/lib/keystone/")
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
'OPENSTACK_PORT_ADMIN': cluster.determine_api_port(
config['admin-port']),
'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port(
config['service-port'])}
save_script_rc(**env_vars)
set_admin_token(config['admin-token'])
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
update_config_block('logger_root', level=config['log-level'],
file='/etc/keystone/logging.conf')
2014-02-03 12:58:43 +01:00
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
if get_os_version_package('keystone') >= '2013.1':
# PKI introduced in Grizzly
configure_pki_tokens(config)
if config_dirty():
utils.restart('keystone')
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Firing identity_changed hook'
' for all related services.')
# HTTPS may have been set - so fire all identity relations
# again
for r_id in utils.relation_ids('identity-service'):
for unit in utils.relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
def upgrade_charm():
# Ensure all required packages are installed
utils.install(*packages)
cluster_changed()
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
def cluster_joined():
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
2013-04-09 13:16:44 +01:00
ensure_local_user=True)
update_config_block('DEFAULT',
public_port=cluster.determine_api_port(config["service-port"]))
update_config_block('DEFAULT',
admin_port=cluster.determine_api_port(config["admin-port"]))
if config_dirty():
utils.restart('keystone')
service_ports = {
2013-03-18 15:02:09 +00:00
"keystone_admin": [
cluster.determine_haproxy_port(config['admin-port']),
2013-03-18 15:02:09 +00:00
cluster.determine_api_port(config["admin-port"])
],
"keystone_service": [
cluster.determine_haproxy_port(config['service-port']),
cluster.determine_api_port(config["service-port"])
]
}
haproxy.configure_haproxy(service_ports)
def cluster_changed():
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
2013-04-09 13:16:44 +01:00
ensure_local_user=True)
synchronize_service_credentials()
service_ports = {
2013-03-18 15:02:09 +00:00
"keystone_admin": [
cluster.determine_haproxy_port(config['admin-port']),
2013-03-18 15:02:09 +00:00
cluster.determine_api_port(config["admin-port"])
],
"keystone_service": [
cluster.determine_haproxy_port(config['service-port']),
cluster.determine_api_port(config["service-port"])
]
}
haproxy.configure_haproxy(service_ports)
def ha_relation_changed():
relation_data = utils.relation_get_dict()
if ('clustered' in relation_data and
cluster.is_leader(CLUSTER_RES)):
utils.juju_log('INFO',
'Cluster configured, notifying other services'
' and updating keystone endpoint configuration')
# Update keystone endpoint to point at VIP
ensure_initial_admin(config)
# Tell all related services to start using
# the VIP and haproxy ports instead
for r_id in utils.relation_ids('identity-service'):
utils.relation_set(rid=r_id,
auth_host=config['vip'],
service_host=config['vip'])
def ha_relation_joined():
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
corosync_bindiface = config['ha-bindiface']
corosync_mcastport = config['ha-mcastport']
vip = config['vip']
vip_cidr = config['vip_cidr']
vip_iface = config['vip_iface']
# Obtain resources
resources = {
'res_ks_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy'
}
resource_params = {
'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(vip, vip_cidr, vip_iface),
'res_ks_haproxy': 'op monitor interval="5s"'
}
init_services = {
'res_ks_haproxy': 'haproxy'
}
clones = {
'cl_ks_haproxy': 'res_ks_haproxy'
}
utils.relation_set(init_services=init_services,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
resources=resources,
resource_params=resource_params,
clones=clones)
2011-12-08 09:52:12 -08:00
hooks = {
"install": install_hook,
"shared-db-relation-joined": db_joined,
"shared-db-relation-changed": db_changed,
"identity-service-relation-joined": identity_joined,
"identity-service-relation-changed": identity_changed,
"config-changed": config_changed,
"cluster-relation-joined": cluster_joined,
"cluster-relation-changed": cluster_changed,
"cluster-relation-departed": cluster_changed,
"ha-relation-joined": ha_relation_joined,
"ha-relation-changed": ha_relation_changed,
"upgrade-charm": upgrade_charm
2011-12-08 09:52:12 -08:00
}
utils.do_hooks(hooks)