2011-12-08 09:52:12 -08:00
|
|
|
#!/usr/bin/python
|
2012-02-29 11:59:37 -08:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
import sys
|
2012-02-29 11:59:37 -08:00
|
|
|
import time
|
2012-10-02 17:36:25 -07:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
from utils import *
|
2012-10-02 17:36:25 -07:00
|
|
|
from lib.openstack_common import *
|
2011-12-08 09:52:12 -08:00
|
|
|
|
|
|
|
config = config_get()
|
|
|
|
|
2013-01-07 12:37:48 +00:00
|
|
|
packages = "keystone python-mysqldb pwgen haproxy python-jinja2"
|
2011-12-08 09:52:12 -08:00
|
|
|
service = "keystone"
|
|
|
|
|
|
|
|
# used to verify joined services are valid openstack components.
|
2011-12-23 17:34:15 -08:00
|
|
|
# this should reflect the current "core" components of openstack
|
|
|
|
# and be expanded as we add support for them as a distro
|
2011-12-08 09:52:12 -08:00
|
|
|
valid_services = {
|
|
|
|
"nova": {
|
|
|
|
"type": "compute",
|
|
|
|
"desc": "Nova Compute Service"
|
|
|
|
},
|
2012-03-08 14:38:36 -08:00
|
|
|
"nova-volume": {
|
|
|
|
"type": "volume",
|
|
|
|
"desc": "Nova Volume Service"
|
|
|
|
},
|
2012-10-02 17:36:25 -07:00
|
|
|
"cinder": {
|
|
|
|
"type": "volume",
|
|
|
|
"desc": "Cinder Volume Service"
|
|
|
|
},
|
2012-03-01 12:35:39 -08:00
|
|
|
"ec2": {
|
|
|
|
"type": "ec2",
|
|
|
|
"desc": "EC2 Compatibility Layer"
|
|
|
|
},
|
2011-12-08 09:52:12 -08:00
|
|
|
"glance": {
|
|
|
|
"type": "image",
|
|
|
|
"desc": "Glance Image Service"
|
2011-12-21 15:29:31 -08:00
|
|
|
},
|
2012-04-13 17:24:56 -07:00
|
|
|
"s3": {
|
|
|
|
"type": "s3",
|
|
|
|
"desc": "S3 Compatible object-store"
|
|
|
|
},
|
2011-12-21 15:29:31 -08:00
|
|
|
"swift": {
|
2012-12-11 12:25:11 -08:00
|
|
|
"type": "object-store",
|
2011-12-21 15:29:31 -08:00
|
|
|
"desc": "Swift Object Storage Service"
|
2012-12-03 15:34:43 +00:00
|
|
|
},
|
|
|
|
"quantum": {
|
|
|
|
"type": "network",
|
|
|
|
"desc": "Quantum Networking Service"
|
2013-01-21 09:05:12 -06:00
|
|
|
},
|
|
|
|
"oxygen": {
|
|
|
|
"type": "oxygen",
|
|
|
|
"desc": "Oxygen Cloud Image Service"
|
2011-12-08 09:52:12 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
def install_hook():
|
2012-10-12 10:26:48 -07:00
|
|
|
if config["openstack-origin"] != "distro":
|
|
|
|
configure_installation_source(config["openstack-origin"])
|
2011-12-23 17:34:15 -08:00
|
|
|
execute("apt-get update", die=True)
|
2011-12-22 10:21:23 -08:00
|
|
|
execute("apt-get -y install %s" % packages, die=True, echo=True)
|
2012-02-28 17:18:17 -08:00
|
|
|
update_config_block('DEFAULT', public_port=config["service-port"])
|
|
|
|
update_config_block('DEFAULT', admin_port=config["admin-port"])
|
2012-10-02 17:36:25 -07:00
|
|
|
set_admin_token(config['admin-token'])
|
2012-03-01 12:35:39 -08:00
|
|
|
|
2012-02-28 17:18:17 -08:00
|
|
|
# set all backends to use sql+sqlite, if they are not already by default
|
|
|
|
update_config_block('sql',
|
|
|
|
connection='sqlite:////var/lib/keystone/keystone.db')
|
|
|
|
update_config_block('identity',
|
|
|
|
driver='keystone.identity.backends.sql.Identity')
|
|
|
|
update_config_block('catalog',
|
|
|
|
driver='keystone.catalog.backends.sql.Catalog')
|
|
|
|
update_config_block('token',
|
|
|
|
driver='keystone.token.backends.sql.Token')
|
|
|
|
update_config_block('ec2',
|
|
|
|
driver='keystone.contrib.ec2.backends.sql.Ec2')
|
2012-01-10 23:21:57 -08:00
|
|
|
execute("service keystone stop", echo=True)
|
2012-02-28 17:18:17 -08:00
|
|
|
execute("keystone-manage db_sync")
|
2012-01-10 23:21:57 -08:00
|
|
|
execute("service keystone start", echo=True)
|
2012-02-29 11:59:37 -08:00
|
|
|
time.sleep(5)
|
2011-12-08 09:52:12 -08:00
|
|
|
ensure_initial_admin(config)
|
|
|
|
|
|
|
|
def db_joined():
|
|
|
|
relation_data = { "database": config["database"],
|
|
|
|
"username": config["database-user"],
|
|
|
|
"hostname": config["hostname"] }
|
|
|
|
relation_set(relation_data)
|
|
|
|
|
|
|
|
def db_changed():
|
2012-10-02 17:36:25 -07:00
|
|
|
relation_data = relation_get_dict()
|
|
|
|
if ('password' not in relation_data or
|
|
|
|
'private-address' not in relation_data):
|
2011-12-08 09:52:12 -08:00
|
|
|
juju_log("private-address or password not set. Peer not ready, exit 0")
|
|
|
|
exit(0)
|
2012-02-28 17:18:17 -08:00
|
|
|
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
|
2011-12-08 09:52:12 -08:00
|
|
|
(config["database-user"],
|
|
|
|
relation_data["password"],
|
|
|
|
relation_data["private-address"],
|
|
|
|
config["database"]))
|
2012-01-10 23:21:57 -08:00
|
|
|
execute("service keystone stop", echo=True)
|
2012-02-28 17:18:17 -08:00
|
|
|
execute("keystone-manage db_sync", echo=True)
|
2012-01-10 23:21:57 -08:00
|
|
|
execute("service keystone start")
|
2012-02-29 11:59:37 -08:00
|
|
|
time.sleep(5)
|
|
|
|
ensure_initial_admin(config)
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2012-09-17 17:39:51 -07:00
|
|
|
# If the backend database has been switched to something new and there
|
|
|
|
# are existing identity-service relations,, service entries need to be
|
|
|
|
# recreated in the new database. Re-executing identity-service-changed
|
|
|
|
# will do this.
|
|
|
|
for id in relation_ids(relation_name='identity-service'):
|
|
|
|
for unit in relation_list(relation_id=id):
|
|
|
|
juju_log("Re-exec'ing identity-service-changed for: %s - %s" %
|
|
|
|
(id, unit))
|
|
|
|
identity_changed(relation_id=id, remote_unit=unit)
|
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
def identity_joined():
|
|
|
|
""" Do nothing until we get information about requested service """
|
|
|
|
pass
|
|
|
|
|
2012-09-17 17:39:51 -07:00
|
|
|
def identity_changed(relation_id=None, remote_unit=None):
|
|
|
|
""" A service has advertised its API endpoints, create an entry in the
|
|
|
|
service catalog.
|
|
|
|
Optionally allow this hook to be re-fired for an existing
|
|
|
|
relation+unit, for context see see db_changed().
|
|
|
|
"""
|
2012-03-01 12:37:18 -08:00
|
|
|
def ensure_valid_service(service):
|
|
|
|
if service not in valid_services.keys():
|
|
|
|
juju_log("WARN: Invalid service requested: '%s'" % service)
|
|
|
|
realtion_set({ "admin_token": -1 })
|
|
|
|
return
|
|
|
|
|
2012-10-28 11:08:49 +01:00
|
|
|
def add_endpoint(region, service, publicurl, adminurl, internalurl):
|
2012-03-01 12:37:18 -08:00
|
|
|
desc = valid_services[service]["desc"]
|
|
|
|
service_type = valid_services[service]["type"]
|
|
|
|
create_service_entry(service, service_type, desc)
|
|
|
|
create_endpoint_template(region=region, service=service,
|
2012-10-28 11:08:49 +01:00
|
|
|
publicurl=publicurl,
|
|
|
|
adminurl=adminurl,
|
|
|
|
internalurl=internalurl)
|
2012-03-01 12:37:18 -08:00
|
|
|
|
2012-12-18 12:00:48 +00:00
|
|
|
if is_clustered() and not is_leader():
|
|
|
|
# Only respond if service unit is the leader
|
|
|
|
return
|
|
|
|
|
2012-09-17 17:39:51 -07:00
|
|
|
settings = relation_get_dict(relation_id=relation_id,
|
|
|
|
remote_unit=remote_unit)
|
2012-03-01 12:37:18 -08:00
|
|
|
|
2013-01-11 14:12:41 -08:00
|
|
|
# Allow the remote service to request creation of any additional roles.
|
|
|
|
# Currently used by Swift.
|
|
|
|
if 'requested_roles' in settings and settings['requested_roles'] != 'None':
|
|
|
|
roles = settings['requested_roles'].split(',')
|
|
|
|
juju_log("Creating requested roles: %s" % roles)
|
|
|
|
for role in roles:
|
|
|
|
create_role(role, user=config['admin-user'], tenant='admin')
|
|
|
|
|
2012-03-01 12:37:18 -08:00
|
|
|
# the minimum settings needed per endpoint
|
|
|
|
single = set(['service', 'region', 'public_url', 'admin_url',
|
|
|
|
'internal_url'])
|
|
|
|
if single.issubset(settings):
|
|
|
|
# other end of relation advertised only one endpoint
|
2012-03-08 14:38:36 -08:00
|
|
|
|
|
|
|
if 'None' in [v for k,v in settings.iteritems()]:
|
2012-09-17 17:39:51 -07:00
|
|
|
# Some backend services advertise no endpoint but require a
|
|
|
|
# hook execution to update auth strategy.
|
2012-03-08 14:38:36 -08:00
|
|
|
return
|
|
|
|
|
2012-03-01 12:37:18 -08:00
|
|
|
ensure_valid_service(settings['service'])
|
|
|
|
add_endpoint(region=settings['region'], service=settings['service'],
|
2012-10-28 11:08:49 +01:00
|
|
|
publicurl=settings['public_url'],
|
|
|
|
adminurl=settings['admin_url'],
|
|
|
|
internalurl=settings['internal_url'])
|
2012-03-02 12:46:20 -08:00
|
|
|
service_username = settings['service']
|
2012-03-01 12:37:18 -08:00
|
|
|
else:
|
|
|
|
# assemble multiple endpoints from relation data. service name
|
|
|
|
# should be prepended to setting name, ie:
|
|
|
|
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
|
|
|
|
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
|
|
|
|
# Results in a dict that looks like:
|
|
|
|
# { 'ec2': {
|
|
|
|
# 'service': $foo
|
|
|
|
# 'region': $foo
|
|
|
|
# 'public_url': $foo
|
|
|
|
# }
|
|
|
|
# 'nova': {
|
|
|
|
# 'service': $foo
|
|
|
|
# 'region': $foo
|
|
|
|
# 'public_url': $foo
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
endpoints = {}
|
|
|
|
for k,v in settings.iteritems():
|
|
|
|
ep = k.split('_')[0]
|
|
|
|
x = '_'.join(k.split('_')[1:])
|
|
|
|
if ep not in endpoints:
|
|
|
|
endpoints[ep] = {}
|
|
|
|
endpoints[ep][x] = v
|
2012-03-02 12:46:20 -08:00
|
|
|
services = []
|
2012-03-01 12:37:18 -08:00
|
|
|
for ep in endpoints:
|
|
|
|
# weed out any unrelated relation stuff Juju might have added
|
|
|
|
# by ensuring each possible endpiont has appropriate fields
|
|
|
|
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
|
|
|
|
if single.issubset(endpoints[ep]):
|
|
|
|
ep = endpoints[ep]
|
|
|
|
ensure_valid_service(ep['service'])
|
|
|
|
add_endpoint(region=ep['region'], service=ep['service'],
|
2012-10-28 11:08:49 +01:00
|
|
|
publicurl=ep['public_url'],
|
|
|
|
adminurl=ep['admin_url'],
|
|
|
|
internalurl=ep['internal_url'])
|
2012-03-02 12:46:20 -08:00
|
|
|
services.append(ep['service'])
|
|
|
|
service_username = '_'.join(services)
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2012-03-08 14:38:36 -08:00
|
|
|
if 'None' in [v for k,v in settings.iteritems()]:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not service_username:
|
|
|
|
return
|
2012-03-02 12:46:20 -08:00
|
|
|
|
2012-03-08 14:38:36 -08:00
|
|
|
token = get_admin_token()
|
2012-03-02 12:46:20 -08:00
|
|
|
juju_log("Creating service credentials for '%s'" % service_username)
|
2012-03-09 14:56:59 -08:00
|
|
|
|
2012-12-18 12:00:48 +00:00
|
|
|
# TODO: This needs to be changed as it won't work with ha keystone
|
2012-03-09 14:56:59 -08:00
|
|
|
stored_passwd = '/var/lib/keystone/%s.passwd' % service_username
|
|
|
|
if os.path.isfile(stored_passwd):
|
|
|
|
juju_log("Loading stored service passwd from %s" % stored_passwd)
|
|
|
|
service_password = open(stored_passwd, 'r').readline().strip('\n')
|
|
|
|
else:
|
|
|
|
juju_log("Generating a new service password for %s" % service_username)
|
|
|
|
service_password = execute('pwgen -c 32 1', die=True)[0].strip()
|
|
|
|
open(stored_passwd, 'w+').writelines("%s\n" % service_password)
|
|
|
|
|
2012-03-02 12:46:20 -08:00
|
|
|
create_user(service_username, service_password, config['service-tenant'])
|
|
|
|
grant_role(service_username, config['admin-role'], config['service-tenant'])
|
|
|
|
|
|
|
|
# As of https://review.openstack.org/#change,4675, all nodes hosting
|
|
|
|
# an endpoint(s) needs a service username and password assigned to
|
|
|
|
# the service tenant and granted admin role.
|
|
|
|
# note: config['service-tenant'] is created in utils.ensure_initial_admin()
|
|
|
|
# we return a token, information about our API endpoints, and the generated
|
|
|
|
# service credentials
|
2011-12-08 09:52:12 -08:00
|
|
|
relation_data = {
|
|
|
|
"admin_token": token,
|
|
|
|
"service_host": config["hostname"],
|
|
|
|
"service_port": config["service-port"],
|
|
|
|
"auth_host": config["hostname"],
|
2012-03-02 12:46:20 -08:00
|
|
|
"auth_port": config["admin-port"],
|
|
|
|
"service_username": service_username,
|
|
|
|
"service_password": service_password,
|
|
|
|
"service_tenant": config['service-tenant']
|
2011-12-08 09:52:12 -08:00
|
|
|
}
|
2012-12-17 15:42:52 +00:00
|
|
|
# Check if clustered and use vip + haproxy ports if so
|
|
|
|
if is_clustered():
|
|
|
|
relation_data["auth_host"] = config['vip']
|
|
|
|
relation_data["auth_port"] = SERVICE_PORTS['keystone_admin']
|
|
|
|
relation_data["service_host"] = config['vip']
|
|
|
|
relation_data["service_port"] = SERVICE_PORTS['keystone_service']
|
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
relation_set(relation_data)
|
|
|
|
|
2012-08-08 16:17:52 -07:00
|
|
|
def config_changed():
|
2012-10-02 17:36:25 -07:00
|
|
|
|
|
|
|
# Determine whether or not we should do an upgrade, based on the
|
|
|
|
# the version offered in keyston-release.
|
2012-10-12 10:26:48 -07:00
|
|
|
available = get_os_codename_install_source(config['openstack-origin'])
|
2012-10-02 17:36:25 -07:00
|
|
|
installed = get_os_codename_package('keystone')
|
|
|
|
|
2012-12-18 19:39:14 -08:00
|
|
|
if (available and
|
|
|
|
get_os_version_codename(available) > get_os_version_codename(installed)):
|
2012-10-12 10:26:48 -07:00
|
|
|
do_openstack_upgrade(config['openstack-origin'], packages)
|
2012-10-02 17:36:25 -07:00
|
|
|
|
|
|
|
set_admin_token(config['admin-token'])
|
2012-12-18 12:00:48 +00:00
|
|
|
|
|
|
|
if is_clustered() and is_leader():
|
|
|
|
juju_log('Cluster leader - ensuring endpoint configuration is up to date')
|
|
|
|
ensure_initial_admin(config)
|
|
|
|
elif not is_clustered():
|
|
|
|
ensure_initial_admin(config)
|
2012-08-08 16:17:52 -07:00
|
|
|
|
2012-12-17 23:16:26 -08:00
|
|
|
update_config_block('logger_root', level=config['log-level'],
|
|
|
|
file='/etc/keystone/logging.conf')
|
2012-12-18 17:29:14 -08:00
|
|
|
if get_os_version_package('keystone') >= '2013.1':
|
|
|
|
# PKI introduced in Grizzly
|
|
|
|
configure_pki_tokens(config)
|
|
|
|
|
2012-12-17 23:16:26 -08:00
|
|
|
execute("service keystone restart", echo=True)
|
2012-12-17 13:45:58 +00:00
|
|
|
cluster_changed()
|
|
|
|
|
|
|
|
|
|
|
|
def upgrade_charm():
|
|
|
|
cluster_changed()
|
2012-12-18 12:00:48 +00:00
|
|
|
if is_clustered() and is_leader():
|
|
|
|
juju_log('Cluster leader - ensuring endpoint configuration is up to date')
|
|
|
|
ensure_initial_admin(config)
|
|
|
|
elif not is_clustered():
|
|
|
|
ensure_initial_admin(config)
|
2012-12-17 13:45:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
SERVICE_PORTS = {
|
|
|
|
"keystone_admin": int(config['admin-port']) + 1,
|
|
|
|
"keystone_service": int(config['service-port']) + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def cluster_changed():
|
|
|
|
cluster_hosts = {}
|
|
|
|
cluster_hosts['self'] = config['hostname']
|
|
|
|
for r_id in relation_ids('cluster'):
|
|
|
|
for unit in relation_list(r_id):
|
|
|
|
cluster_hosts[unit.replace('/','-')] = \
|
|
|
|
relation_get_dict(relation_id=r_id,
|
|
|
|
remote_unit=unit)['private-address']
|
|
|
|
configure_haproxy(cluster_hosts,
|
|
|
|
SERVICE_PORTS)
|
|
|
|
|
|
|
|
|
|
|
|
def ha_relation_changed():
|
2012-12-17 15:42:52 +00:00
|
|
|
relation_data = relation_get_dict()
|
2012-12-18 12:00:48 +00:00
|
|
|
if ('clustered' in relation_data and
|
|
|
|
is_leader()):
|
|
|
|
juju_log('Cluster configured, notifying other services and updating'
|
|
|
|
'keystone endpoint configuration')
|
|
|
|
# Update keystone endpoint to point at VIP
|
|
|
|
ensure_initial_admin(config)
|
2012-12-17 15:42:52 +00:00
|
|
|
# Tell all related services to start using
|
|
|
|
# the VIP and haproxy ports instead
|
|
|
|
for r_id in relation_ids('identity-service'):
|
|
|
|
relation_set_2(rid=r_id,
|
|
|
|
auth_host=config['vip'],
|
|
|
|
service_host=config['vip'],
|
|
|
|
service_port=SERVICE_PORTS['keystone_service'],
|
|
|
|
auth_port=SERVICE_PORTS['keystone_admin'])
|
|
|
|
|
|
|
|
|
2012-12-17 13:45:58 +00:00
|
|
|
def ha_relation_joined():
|
|
|
|
# Obtain the config values necessary for the cluster config. These
|
|
|
|
# include multicast port and interface to bind to.
|
|
|
|
corosync_bindiface = config['ha-bindiface']
|
|
|
|
corosync_mcastport = config['ha-mcastport']
|
|
|
|
|
|
|
|
# Obtain resources
|
|
|
|
resources = {
|
|
|
|
'res_ks_vip':'ocf:heartbeat:IPaddr2',
|
|
|
|
'res_ks_haproxy':'lsb:haproxy'
|
|
|
|
}
|
|
|
|
# TODO: Obtain netmask and nic where to place VIP.
|
|
|
|
resource_params = {
|
2012-12-17 14:52:47 -05:00
|
|
|
'res_ks_vip':'params ip="%s" cidr_netmask="%s" nic="%s"' % (config['vip'],
|
|
|
|
config['vip_cidr'], config['vip_iface']),
|
2012-12-17 13:45:58 +00:00
|
|
|
'res_ks_haproxy':'op monitor interval="5s"'
|
|
|
|
}
|
|
|
|
init_services = {
|
|
|
|
'res_ks_haproxy':'haproxy'
|
|
|
|
}
|
|
|
|
groups = {
|
|
|
|
'grp_ks_haproxy':'res_ks_vip res_ks_haproxy'
|
|
|
|
}
|
|
|
|
#clones = {
|
|
|
|
# 'cln_ks_haproxy':'res_ks_haproxy meta globally-unique="false" interleave="true"'
|
|
|
|
# }
|
|
|
|
|
|
|
|
#orders = {
|
|
|
|
# 'ord_vip_before_haproxy':'inf: res_ks_vip res_ks_haproxy'
|
|
|
|
# }
|
|
|
|
#colocations = {
|
|
|
|
# 'col_vip_on_haproxy':'inf: res_ks_haproxy res_ks_vip'
|
|
|
|
# }
|
|
|
|
|
|
|
|
relation_set_2(init_services=init_services,
|
|
|
|
corosync_bindiface=corosync_bindiface,
|
|
|
|
corosync_mcastport=corosync_mcastport,
|
|
|
|
resources=resources,
|
|
|
|
resource_params=resource_params,
|
|
|
|
groups=groups)
|
|
|
|
|
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
hooks = {
|
|
|
|
"install": install_hook,
|
|
|
|
"shared-db-relation-joined": db_joined,
|
|
|
|
"shared-db-relation-changed": db_changed,
|
|
|
|
"identity-service-relation-joined": identity_joined,
|
2012-08-08 16:17:52 -07:00
|
|
|
"identity-service-relation-changed": identity_changed,
|
2012-12-17 13:45:58 +00:00
|
|
|
"config-changed": config_changed,
|
|
|
|
"cluster-relation-changed": cluster_changed,
|
|
|
|
"cluster-relation-departed": cluster_changed,
|
|
|
|
"ha-relation-joined": ha_relation_joined,
|
|
|
|
"ha-relation-changed": ha_relation_changed,
|
|
|
|
"upgrade-charm": upgrade_charm
|
2011-12-08 09:52:12 -08:00
|
|
|
}
|
|
|
|
|
2011-12-23 17:34:15 -08:00
|
|
|
# keystone-hooks gets called by symlink corresponding to the requested relation
|
|
|
|
# hook.
|
2011-12-08 09:52:12 -08:00
|
|
|
arg0 = sys.argv[0].split("/").pop()
|
|
|
|
if arg0 not in hooks.keys():
|
|
|
|
error_out("Unsupported hook: %s" % arg0)
|
|
|
|
hooks[arg0]()
|