485 lines
18 KiB
Plaintext
Raw Normal View History

2011-12-08 09:52:12 -08:00
#!/usr/bin/python
2011-12-08 09:52:12 -08:00
import sys
import time
import urlparse
2013-02-07 21:03:44 -08:00
from base64 import b64encode
2011-12-08 09:52:12 -08:00
from utils import *
from lib.openstack_common import *
import lib.unison as unison
2011-12-08 09:52:12 -08:00
config = config_get()
packages = "keystone python-mysqldb pwgen haproxy python-jinja2 openssl unison"
2011-12-08 09:52:12 -08:00
service = "keystone"
# used to verify joined services are valid openstack components.
2011-12-23 17:34:15 -08:00
# this should reflect the current "core" components of openstack
# and be expanded as we add support for them as a distro
2011-12-08 09:52:12 -08:00
valid_services = {
"nova": {
"type": "compute",
"desc": "Nova Compute Service"
},
"nova-volume": {
"type": "volume",
"desc": "Nova Volume Service"
},
"cinder": {
"type": "volume",
"desc": "Cinder Volume Service"
},
2012-03-01 12:35:39 -08:00
"ec2": {
"type": "ec2",
"desc": "EC2 Compatibility Layer"
},
2011-12-08 09:52:12 -08:00
"glance": {
"type": "image",
"desc": "Glance Image Service"
2011-12-21 15:29:31 -08:00
},
2012-04-13 17:24:56 -07:00
"s3": {
"type": "s3",
"desc": "S3 Compatible object-store"
},
2011-12-21 15:29:31 -08:00
"swift": {
"type": "object-store",
2011-12-21 15:29:31 -08:00
"desc": "Swift Object Storage Service"
2012-12-03 15:34:43 +00:00
},
"quantum": {
"type": "network",
"desc": "Quantum Networking Service"
},
"oxygen": {
"type": "oxygen",
"desc": "Oxygen Cloud Image Service"
2013-01-31 12:55:04 +01:00
},
"ceilometer": {
"type": "metering",
"desc": "Ceilometer Metering Service"
2011-12-08 09:52:12 -08:00
}
}
def install_hook():
if config["openstack-origin"] != "distro":
configure_installation_source(config["openstack-origin"])
2011-12-23 17:34:15 -08:00
execute("apt-get update", die=True)
execute("apt-get -y install %s" % packages, die=True, echo=True)
2012-02-28 17:18:17 -08:00
update_config_block('DEFAULT', public_port=config["service-port"])
update_config_block('DEFAULT', admin_port=config["admin-port"])
set_admin_token(config['admin-token'])
2012-03-01 12:35:39 -08:00
2012-02-28 17:18:17 -08:00
# set all backends to use sql+sqlite, if they are not already by default
update_config_block('sql',
connection='sqlite:////var/lib/keystone/keystone.db')
update_config_block('identity',
driver='keystone.identity.backends.sql.Identity')
update_config_block('catalog',
driver='keystone.catalog.backends.sql.Catalog')
update_config_block('token',
driver='keystone.token.backends.sql.Token')
update_config_block('ec2',
driver='keystone.contrib.ec2.backends.sql.Ec2')
execute("service keystone stop", echo=True)
2012-02-28 17:18:17 -08:00
execute("keystone-manage db_sync")
execute("service keystone start", echo=True)
2013-02-07 21:03:44 -08:00
# ensure /var/lib/keystone is g+wrx for peer relations that
# may be syncing data there via SSH_USER.
execute("chmod -R g+wrx /var/lib/keystone/")
2013-02-07 21:03:44 -08:00
time.sleep(5)
2011-12-08 09:52:12 -08:00
ensure_initial_admin(config)
2011-12-08 09:52:12 -08:00
def db_joined():
relation_data = { "database": config["database"],
"username": config["database-user"],
"hostname": config["hostname"] }
relation_set(relation_data)
def db_changed():
relation_data = relation_get_dict()
if ('password' not in relation_data or
2013-02-15 12:23:41 -05:00
'db_host' not in relation_data):
juju_log("db_host or password not set. Peer not ready, exit 0")
2011-12-08 09:52:12 -08:00
exit(0)
2012-02-28 17:18:17 -08:00
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
2011-12-08 09:52:12 -08:00
(config["database-user"],
relation_data["password"],
2013-02-15 12:23:41 -05:00
relation_data["db_host"],
2011-12-08 09:52:12 -08:00
config["database"]))
execute("service keystone stop", echo=True)
if not eligible_leader():
juju_log('Deferring DB initialization to service leader.')
execute("service keystone start")
return
2012-02-28 17:18:17 -08:00
execute("keystone-manage db_sync", echo=True)
execute("service keystone start")
time.sleep(5)
ensure_initial_admin(config)
2011-12-08 09:52:12 -08:00
# If the backend database has been switched to something new and there
# are existing identity-service relations,, service entries need to be
# recreated in the new database. Re-executing identity-service-changed
# will do this.
for id in relation_ids(relation_name='identity-service'):
for unit in relation_list(relation_id=id):
juju_log("Re-exec'ing identity-service-changed for: %s - %s" %
(id, unit))
identity_changed(relation_id=id, remote_unit=unit)
2013-02-07 21:03:44 -08:00
def ensure_valid_service(service):
if service not in valid_services.keys():
juju_log("WARN: Invalid service requested: '%s'" % service)
realtion_set({ "admin_token": -1 })
return
def add_endpoint(region, service, public_url, admin_url, internal_url):
desc = valid_services[service]["desc"]
service_type = valid_services[service]["type"]
create_service_entry(service, service_type, desc)
create_endpoint_template(region=region, service=service,
public_url=public_url,
admin_url=admin_url,
internal_url=internal_url)
2011-12-08 09:52:12 -08:00
def identity_joined():
""" Do nothing until we get information about requested service """
pass
def identity_changed(relation_id=None, remote_unit=None):
""" A service has advertised its API endpoints, create an entry in the
service catalog.
Optionally allow this hook to be re-fired for an existing
relation+unit, for context see see db_changed().
"""
def ensure_valid_service(service):
if service not in valid_services.keys():
juju_log("WARN: Invalid service requested: '%s'" % service)
realtion_set({ "admin_token": -1 })
return
def add_endpoint(region, service, publicurl, adminurl, internalurl):
desc = valid_services[service]["desc"]
service_type = valid_services[service]["type"]
create_service_entry(service, service_type, desc)
create_endpoint_template(region=region, service=service,
publicurl=publicurl,
adminurl=adminurl,
internalurl=internalurl)
if not eligible_leader():
juju_log('Deferring identity_changed() to service leader.')
return
settings = relation_get_dict(relation_id=relation_id,
remote_unit=remote_unit)
# Allow the remote service to request creation of any additional roles.
# Currently used by Swift.
if 'requested_roles' in settings and settings['requested_roles'] != 'None':
roles = settings['requested_roles'].split(',')
juju_log("Creating requested roles: %s" % roles)
for role in roles:
create_role(role, user=config['admin-user'], tenant='admin')
# the minimum settings needed per endpoint
single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url'])
if single.issubset(settings):
# other end of relation advertised only one endpoint
if 'None' in [v for k,v in settings.iteritems()]:
# Some backend services advertise no endpoint but require a
# hook execution to update auth strategy.
relation_data = {}
# Check if clustered and use vip + haproxy ports if so
if is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["auth_port"] = SERVICE_PORTS['keystone_admin']
relation_data["service_host"] = config['vip']
relation_data["service_port"] = SERVICE_PORTS['keystone_service']
else:
relation_data["auth_host"] = config['hostname']
2013-03-11 09:10:47 +00:00
relation_data["auth_port"] = config['admin-port']
relation_data["service_host"] = config['hostname']
relation_data["service_port"] = config['service-port']
relation_set(relation_data)
return
ensure_valid_service(settings['service'])
2013-02-07 21:03:44 -08:00
add_endpoint(region=settings['region'], service=settings['service'],
publicurl=settings['public_url'],
adminurl=settings['admin_url'],
internalurl=settings['internal_url'])
service_username = settings['service']
https_cn = urlparse.urlparse(settings['internal_url'])
https_cn = https_cn.hostname
else:
# assemble multiple endpoints from relation data. service name
# should be prepended to setting name, ie:
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
# Results in a dict that looks like:
# { 'ec2': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# 'nova': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# }
endpoints = {}
for k,v in settings.iteritems():
ep = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if ep not in endpoints:
endpoints[ep] = {}
endpoints[ep][x] = v
services = []
https_cn = None
for ep in endpoints:
# weed out any unrelated relation stuff Juju might have added
# by ensuring each possible endpiont has appropriate fields
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
if single.issubset(endpoints[ep]):
ep = endpoints[ep]
ensure_valid_service(ep['service'])
add_endpoint(region=ep['region'], service=ep['service'],
publicurl=ep['public_url'],
adminurl=ep['admin_url'],
internalurl=ep['internal_url'])
services.append(ep['service'])
if not https_cn:
https_cn = urlparse.urlparse(ep['internal_url'])
https_cn = https_cn.hostname
service_username = '_'.join(services)
2011-12-08 09:52:12 -08:00
if 'None' in [v for k,v in settings.iteritems()]:
return
if not service_username:
return
token = get_admin_token()
juju_log("Creating service credentials for '%s'" % service_username)
2012-03-09 14:56:59 -08:00
service_password = get_service_password(service_username)
create_user(service_username, service_password, config['service-tenant'])
grant_role(service_username, config['admin-role'], config['service-tenant'])
# As of https://review.openstack.org/#change,4675, all nodes hosting
# an endpoint(s) needs a service username and password assigned to
# the service tenant and granted admin role.
# note: config['service-tenant'] is created in utils.ensure_initial_admin()
# we return a token, information about our API endpoints, and the generated
# service credentials
2011-12-08 09:52:12 -08:00
relation_data = {
"admin_token": token,
"service_host": config["hostname"],
"service_port": config["service-port"],
"auth_host": config["hostname"],
"auth_port": config["admin-port"],
"service_username": service_username,
"service_password": service_password,
"service_tenant": config['service-tenant'],
"https_keystone": "False",
"ssl_cert": "",
"ssl_key": "",
"ca_cert": ""
2011-12-08 09:52:12 -08:00
}
2013-02-07 21:03:44 -08:00
if relation_id:
relation_data['rid'] = relation_id
# Check if clustered and use vip + haproxy ports if so
if is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["auth_port"] = SERVICE_PORTS['keystone_admin']
relation_data["service_host"] = config['vip']
relation_data["service_port"] = SERVICE_PORTS['keystone_service']
# generate or get a new cert/key for service if set to manage certs.
2013-02-07 21:03:44 -08:00
if config['https-service-endpoints'] in ['True', 'true']:
ca = get_ca(user=SSH_USER)
2013-02-07 21:03:44 -08:00
service = os.getenv('JUJU_REMOTE_UNIT').split('/')[0]
cert, key = ca.get_cert_and_key(common_name=https_cn)
2013-02-07 21:03:44 -08:00
ca_bundle= ca.get_ca_bundle()
relation_data['ssl_cert'] = b64encode(cert)
relation_data['ssl_key'] = b64encode(key)
relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True'
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
relation_set_2(**relation_data)
synchronize_service_credentials()
2011-12-08 09:52:12 -08:00
def config_changed():
# Determine whether or not we should do an upgrade, based on the
# the version offered in keyston-release.
available = get_os_codename_install_source(config['openstack-origin'])
installed = get_os_codename_package('keystone')
2012-12-18 19:39:14 -08:00
if (available and
get_os_version_codename(available) > get_os_version_codename(installed)):
do_openstack_upgrade(config['openstack-origin'], packages)
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
'OPENSTACK_PORT_ADMIN': config['admin-port'],
'OPENSTACK_PORT_PUBLIC': config['service-port']}
save_script_rc(**env_vars)
set_admin_token(config['admin-token'])
if eligible_leader():
juju_log('Cluster leader - ensuring endpoint configuration is up to date')
ensure_initial_admin(config)
update_config_block('logger_root', level=config['log-level'],
file='/etc/keystone/logging.conf')
if get_os_version_package('keystone') >= '2013.1':
# PKI introduced in Grizzly
configure_pki_tokens(config)
execute("service keystone restart", echo=True)
cluster_changed()
def upgrade_charm():
cluster_changed()
if eligible_leader():
juju_log('Cluster leader - ensuring endpoint configuration is up to date')
ensure_initial_admin(config)
SERVICE_PORTS = {
"keystone_admin": int(config['admin-port']) + 1,
"keystone_service": int(config['service-port']) + 1
}
def cluster_joined():
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
ensure_user=True)
def cluster_changed():
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
ensure_user=True)
cluster_hosts = {}
cluster_hosts['self'] = config['hostname']
for r_id in relation_ids('cluster'):
for unit in relation_list(r_id):
2013-02-13 13:06:02 -08:00
# trigger identity-changed to reconfigure HTTPS
# as necessary.
identity_changed(relation_id=r_id, remote_unit=unit)
cluster_hosts[unit.replace('/','-')] = \
relation_get_dict(relation_id=r_id,
remote_unit=unit)['private-address']
configure_haproxy(cluster_hosts,
SERVICE_PORTS)
synchronize_service_credentials()
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
# trigger identity-changed to reconfigure HTTPS as necessary
identity_changed(relation_id=r_id, remote_unit=unit)
def ha_relation_changed():
relation_data = relation_get_dict()
if ('clustered' in relation_data and
is_leader()):
juju_log('Cluster configured, notifying other services and updating'
'keystone endpoint configuration')
# Update keystone endpoint to point at VIP
ensure_initial_admin(config)
# Tell all related services to start using
# the VIP and haproxy ports instead
for r_id in relation_ids('identity-service'):
relation_set_2(rid=r_id,
auth_host=config['vip'],
service_host=config['vip'],
service_port=SERVICE_PORTS['keystone_service'],
auth_port=SERVICE_PORTS['keystone_admin'])
def ha_relation_joined():
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
corosync_bindiface = config['ha-bindiface']
corosync_mcastport = config['ha-mcastport']
# Obtain resources
resources = {
'res_ks_vip':'ocf:heartbeat:IPaddr2',
'res_ks_haproxy':'lsb:haproxy'
}
# TODO: Obtain netmask and nic where to place VIP.
resource_params = {
'res_ks_vip':'params ip="%s" cidr_netmask="%s" nic="%s"' % (config['vip'],
config['vip_cidr'], config['vip_iface']),
'res_ks_haproxy':'op monitor interval="5s"'
}
init_services = {
'res_ks_haproxy':'haproxy'
}
groups = {
'grp_ks_haproxy':'res_ks_vip res_ks_haproxy'
}
#clones = {
# 'cln_ks_haproxy':'res_ks_haproxy meta globally-unique="false" interleave="true"'
# }
#orders = {
# 'ord_vip_before_haproxy':'inf: res_ks_vip res_ks_haproxy'
# }
#colocations = {
# 'col_vip_on_haproxy':'inf: res_ks_haproxy res_ks_vip'
# }
relation_set_2(init_services=init_services,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
resources=resources,
resource_params=resource_params,
groups=groups)
2011-12-08 09:52:12 -08:00
hooks = {
"install": install_hook,
"shared-db-relation-joined": db_joined,
"shared-db-relation-changed": db_changed,
"identity-service-relation-joined": identity_joined,
"identity-service-relation-changed": identity_changed,
"config-changed": config_changed,
"cluster-relation-joined": cluster_joined,
"cluster-relation-changed": cluster_changed,
"cluster-relation-departed": cluster_changed,
"ha-relation-joined": ha_relation_joined,
"ha-relation-changed": ha_relation_changed,
"upgrade-charm": upgrade_charm
2011-12-08 09:52:12 -08:00
}
2011-12-23 17:34:15 -08:00
# keystone-hooks gets called by symlink corresponding to the requested relation
# hook.
hook = os.path.basename(sys.argv[0])
if hook not in hooks.keys():
error_out("Unsupported hook: %s" % hook)
hooks[hook]()