Refactoring to use openstack charm helpers
This commit is contained in:
parent
b22142ee63
commit
7ce1bb2dd7
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1,19 +1,53 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import urlparse
|
||||
|
||||
from base64 import b64encode
|
||||
|
||||
from utils import *
|
||||
from keystone_utils import (
|
||||
config_get,
|
||||
execute,
|
||||
update_config_block,
|
||||
set_admin_token,
|
||||
ensure_initial_admin,
|
||||
relation_get_dict,
|
||||
create_service_entry,
|
||||
create_endpoint_template,
|
||||
create_role,
|
||||
get_admin_token,
|
||||
get_service_password,
|
||||
create_user,
|
||||
grant_role,
|
||||
get_ca,
|
||||
synchronize_service_credentials,
|
||||
do_openstack_upgrade,
|
||||
configure_pki_tokens,
|
||||
SSH_USER,
|
||||
SSL_DIR,
|
||||
CLUSTER_RES
|
||||
)
|
||||
|
||||
from lib.openstack_common import *
|
||||
from lib.openstack_common import (
|
||||
configure_installation_source,
|
||||
get_os_codename_install_source,
|
||||
get_os_codename_package,
|
||||
get_os_version_codename,
|
||||
get_os_version_package,
|
||||
save_script_rc
|
||||
)
|
||||
import lib.unison as unison
|
||||
import lib.utils as utils
|
||||
import lib.cluster_utils as cluster
|
||||
import lib.haproxy_utils as haproxy
|
||||
|
||||
config = config_get()
|
||||
|
||||
packages = "keystone python-mysqldb pwgen haproxy python-jinja2 openssl unison"
|
||||
packages = [
|
||||
"keystone", "python-mysqldb", "pwgen",
|
||||
"haproxy", "python-jinja2", "openssl", "unison"
|
||||
]
|
||||
service = "keystone"
|
||||
|
||||
# used to verify joined services are valid openstack components.
|
||||
@ -62,13 +96,15 @@ valid_services = {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def install_hook():
|
||||
if config["openstack-origin"] != "distro":
|
||||
configure_installation_source(config["openstack-origin"])
|
||||
execute("apt-get update", die=True)
|
||||
execute("apt-get -y install %s" % packages, die=True, echo=True)
|
||||
update_config_block('DEFAULT', public_port=config["service-port"])
|
||||
update_config_block('DEFAULT', admin_port=config["admin-port"])
|
||||
utils.install(packages)
|
||||
update_config_block('DEFAULT',
|
||||
public_port=cluster.determine_api_port(config["service-port"]))
|
||||
update_config_block('DEFAULT',
|
||||
admin_port=cluster.determine_api_port(config["admin-port"]))
|
||||
set_admin_token(config['admin-token'])
|
||||
|
||||
# set all backends to use sql+sqlite, if they are not already by default
|
||||
@ -83,9 +119,9 @@ def install_hook():
|
||||
update_config_block('ec2',
|
||||
driver='keystone.contrib.ec2.backends.sql.Ec2')
|
||||
|
||||
execute("service keystone stop", echo=True)
|
||||
utils.stop('keystone')
|
||||
execute("keystone-manage db_sync")
|
||||
execute("service keystone start", echo=True)
|
||||
utils.start('keystone')
|
||||
|
||||
# ensure /var/lib/keystone is g+wrx for peer relations that
|
||||
# may be syncing data there via SSH_USER.
|
||||
@ -96,17 +132,21 @@ def install_hook():
|
||||
|
||||
|
||||
def db_joined():
|
||||
relation_data = { "database": config["database"],
|
||||
relation_data = {
|
||||
"database": config["database"],
|
||||
"username": config["database-user"],
|
||||
"hostname": config["hostname"] }
|
||||
relation_set(relation_data)
|
||||
"hostname": config["hostname"]
|
||||
}
|
||||
utils.relation_set(**relation_data)
|
||||
|
||||
|
||||
def db_changed():
|
||||
relation_data = relation_get_dict()
|
||||
if ('password' not in relation_data or
|
||||
'db_host' not in relation_data):
|
||||
juju_log("db_host or password not set. Peer not ready, exit 0")
|
||||
exit(0)
|
||||
utils.juju_log('INFO',
|
||||
"db_host or password not set. Peer not ready, exit 0")
|
||||
return
|
||||
|
||||
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
|
||||
(config["database-user"],
|
||||
@ -114,15 +154,13 @@ def db_changed():
|
||||
relation_data["db_host"],
|
||||
config["database"]))
|
||||
|
||||
execute("service keystone stop", echo=True)
|
||||
|
||||
if not eligible_leader():
|
||||
juju_log('Deferring DB initialization to service leader.')
|
||||
execute("service keystone start")
|
||||
return
|
||||
|
||||
utils.stop('keystone')
|
||||
if cluster.eligible_leader(CLUSTER_RES):
|
||||
utils.juju_log('INFO',
|
||||
'Cluster leader, performing db-sync')
|
||||
execute("keystone-manage db_sync", echo=True)
|
||||
execute("service keystone start")
|
||||
utils.start('keystone')
|
||||
|
||||
time.sleep(5)
|
||||
ensure_initial_admin(config)
|
||||
|
||||
@ -130,42 +168,21 @@ def db_changed():
|
||||
# are existing identity-service relations,, service entries need to be
|
||||
# recreated in the new database. Re-executing identity-service-changed
|
||||
# will do this.
|
||||
for id in relation_ids(relation_name='identity-service'):
|
||||
for unit in relation_list(relation_id=id):
|
||||
juju_log("Re-exec'ing identity-service-changed for: %s - %s" %
|
||||
(id, unit))
|
||||
identity_changed(relation_id=id, remote_unit=unit)
|
||||
for rid in utils.relation_ids('identity-service'):
|
||||
for unit in utils.relation_list(rid=rid):
|
||||
utils.juju_log('INFO',
|
||||
"Re-exec'ing identity-service-changed"
|
||||
" for: %s - %s" % (rid, unit))
|
||||
identity_changed(relation_id=rid, remote_unit=unit)
|
||||
|
||||
|
||||
def ensure_valid_service(service):
|
||||
if service not in valid_services.keys():
|
||||
juju_log("WARN: Invalid service requested: '%s'" % service)
|
||||
realtion_set({ "admin_token": -1 })
|
||||
utils.juju_log('WARNING',
|
||||
"Invalid service requested: '%s'" % service)
|
||||
utils.relation_set(admin_token=-1)
|
||||
return
|
||||
|
||||
def add_endpoint(region, service, public_url, admin_url, internal_url):
|
||||
desc = valid_services[service]["desc"]
|
||||
service_type = valid_services[service]["type"]
|
||||
create_service_entry(service, service_type, desc)
|
||||
create_endpoint_template(region=region, service=service,
|
||||
public_url=public_url,
|
||||
admin_url=admin_url,
|
||||
internal_url=internal_url)
|
||||
|
||||
def identity_joined():
|
||||
""" Do nothing until we get information about requested service """
|
||||
pass
|
||||
|
||||
def identity_changed(relation_id=None, remote_unit=None):
|
||||
""" A service has advertised its API endpoints, create an entry in the
|
||||
service catalog.
|
||||
Optionally allow this hook to be re-fired for an existing
|
||||
relation+unit, for context see see db_changed().
|
||||
"""
|
||||
def ensure_valid_service(service):
|
||||
if service not in valid_services.keys():
|
||||
juju_log("WARN: Invalid service requested: '%s'" % service)
|
||||
realtion_set({ "admin_token": -1 })
|
||||
return
|
||||
|
||||
def add_endpoint(region, service, publicurl, adminurl, internalurl):
|
||||
desc = valid_services[service]["desc"]
|
||||
@ -176,8 +193,21 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
adminurl=adminurl,
|
||||
internalurl=internalurl)
|
||||
|
||||
if not eligible_leader():
|
||||
juju_log('Deferring identity_changed() to service leader.')
|
||||
|
||||
def identity_joined():
|
||||
""" Do nothing until we get information about requested service """
|
||||
pass
|
||||
|
||||
|
||||
def identity_changed(relation_id=None, remote_unit=None):
|
||||
""" A service has advertised its API endpoints, create an entry in the
|
||||
service catalog.
|
||||
Optionally allow this hook to be re-fired for an existing
|
||||
relation+unit, for context see see db_changed().
|
||||
"""
|
||||
if not cluster.eligible_leader(CLUSTER_RES):
|
||||
utils.juju_log('INFO',
|
||||
'Deferring identity_changed() to service leader.')
|
||||
return
|
||||
|
||||
settings = relation_get_dict(relation_id=relation_id,
|
||||
@ -187,7 +217,8 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
# Currently used by Swift.
|
||||
if 'requested_roles' in settings and settings['requested_roles'] != 'None':
|
||||
roles = settings['requested_roles'].split(',')
|
||||
juju_log("Creating requested roles: %s" % roles)
|
||||
utils.juju_log('INFO',
|
||||
"Creating requested roles: %s" % roles)
|
||||
for role in roles:
|
||||
create_role(role, user=config['admin-user'], tenant='admin')
|
||||
|
||||
@ -201,20 +232,17 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
# hook execution to update auth strategy.
|
||||
relation_data = {}
|
||||
# Check if clustered and use vip + haproxy ports if so
|
||||
if is_clustered():
|
||||
if cluster.is_clustered():
|
||||
relation_data["auth_host"] = config['vip']
|
||||
relation_data["auth_port"] = SERVICE_PORTS['keystone_admin']
|
||||
relation_data["service_host"] = config['vip']
|
||||
relation_data["service_port"] = SERVICE_PORTS['keystone_service']
|
||||
else:
|
||||
relation_data["auth_host"] = config['hostname']
|
||||
relation_data["auth_port"] = config['admin-port']
|
||||
relation_data["service_host"] = config['hostname']
|
||||
relation_data["auth_port"] = config['admin-port']
|
||||
relation_data["service_port"] = config['service-port']
|
||||
relation_set(relation_data)
|
||||
utils.relation_set(**relation_data)
|
||||
return
|
||||
|
||||
|
||||
ensure_valid_service(settings['service'])
|
||||
|
||||
add_endpoint(region=settings['region'], service=settings['service'],
|
||||
@ -274,11 +302,13 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
return
|
||||
|
||||
token = get_admin_token()
|
||||
juju_log("Creating service credentials for '%s'" % service_username)
|
||||
utils.juju_log('INFO',
|
||||
"Creating service credentials for '%s'" % service_username)
|
||||
|
||||
service_password = get_service_password(service_username)
|
||||
create_user(service_username, service_password, config['service-tenant'])
|
||||
grant_role(service_username, config['admin-role'], config['service-tenant'])
|
||||
grant_role(service_username, config['admin-role'],
|
||||
config['service-tenant'])
|
||||
|
||||
# As of https://review.openstack.org/#change,4675, all nodes hosting
|
||||
# an endpoint(s) needs a service username and password assigned to
|
||||
@ -305,16 +335,13 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
relation_data['rid'] = relation_id
|
||||
|
||||
# Check if clustered and use vip + haproxy ports if so
|
||||
if is_clustered():
|
||||
if cluster.is_clustered():
|
||||
relation_data["auth_host"] = config['vip']
|
||||
relation_data["auth_port"] = SERVICE_PORTS['keystone_admin']
|
||||
relation_data["service_host"] = config['vip']
|
||||
relation_data["service_port"] = SERVICE_PORTS['keystone_service']
|
||||
|
||||
# generate or get a new cert/key for service if set to manage certs.
|
||||
if config['https-service-endpoints'] in ['True', 'true']:
|
||||
ca = get_ca(user=SSH_USER)
|
||||
service = os.getenv('JUJU_REMOTE_UNIT').split('/')[0]
|
||||
cert, key = ca.get_cert_and_key(common_name=https_cn)
|
||||
ca_bundle = ca.get_ca_bundle()
|
||||
relation_data['ssl_cert'] = b64encode(cert)
|
||||
@ -323,9 +350,10 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
relation_data['https_keystone'] = 'True'
|
||||
unison.sync_to_peers(peer_interface='cluster',
|
||||
paths=[SSL_DIR], user=SSH_USER, verbose=True)
|
||||
relation_set_2(**relation_data)
|
||||
utils.relation_set(**relation_data)
|
||||
synchronize_service_credentials()
|
||||
|
||||
|
||||
def config_changed():
|
||||
|
||||
# Determine whether or not we should do an upgrade, based on the
|
||||
@ -334,7 +362,8 @@ def config_changed():
|
||||
installed = get_os_codename_package('keystone')
|
||||
|
||||
if (available and
|
||||
get_os_version_codename(available) > get_os_version_codename(installed)):
|
||||
get_os_version_codename(available) > \
|
||||
get_os_version_codename(installed)):
|
||||
do_openstack_upgrade(config['openstack-origin'], packages)
|
||||
|
||||
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
|
||||
@ -344,8 +373,10 @@ def config_changed():
|
||||
|
||||
set_admin_token(config['admin-token'])
|
||||
|
||||
if eligible_leader():
|
||||
juju_log('Cluster leader - ensuring endpoint configuration is up to date')
|
||||
if cluster.eligible_leader(CLUSTER_RES):
|
||||
utils.juju_log('INFO',
|
||||
'Cluster leader - ensuring endpoint configuration'
|
||||
' is up to date')
|
||||
ensure_initial_admin(config)
|
||||
|
||||
update_config_block('logger_root', level=config['log-level'],
|
||||
@ -354,69 +385,68 @@ def config_changed():
|
||||
# PKI introduced in Grizzly
|
||||
configure_pki_tokens(config)
|
||||
|
||||
execute("service keystone restart", echo=True)
|
||||
utils.restart('keystone')
|
||||
cluster_changed()
|
||||
|
||||
|
||||
def upgrade_charm():
|
||||
cluster_changed()
|
||||
if eligible_leader():
|
||||
juju_log('Cluster leader - ensuring endpoint configuration is up to date')
|
||||
if cluster.eligible_leader(CLUSTER_RES):
|
||||
utils.juju_log('INFO',
|
||||
'Cluster leader - ensuring endpoint configuration'
|
||||
' is up to date')
|
||||
ensure_initial_admin(config)
|
||||
|
||||
|
||||
SERVICE_PORTS = {
|
||||
"keystone_admin": int(config['admin-port']) + 1,
|
||||
"keystone_service": int(config['service-port']) + 1
|
||||
}
|
||||
|
||||
def cluster_joined():
|
||||
unison.ssh_authorized_peers(user=SSH_USER,
|
||||
group='keystone',
|
||||
peer_interface='cluster',
|
||||
ensure_user=True)
|
||||
update_config_block('DEFAULT',
|
||||
public_port=cluster.determine_api_port(config["service-port"]))
|
||||
update_config_block('DEFAULT',
|
||||
admin_port=cluster.determine_api_port(config["admin-port"]))
|
||||
utils.restart('keystone')
|
||||
service_ports = {
|
||||
"keystone_admin": \
|
||||
cluster.determine_haproxy_port(config['admin-port']),
|
||||
"keystone_service": \
|
||||
cluster.determine_haproxy_port(config['service-port'])
|
||||
}
|
||||
haproxy.configure_haproxy(service_ports)
|
||||
|
||||
|
||||
def cluster_changed():
|
||||
unison.ssh_authorized_peers(user=SSH_USER,
|
||||
group='keystone',
|
||||
peer_interface='cluster',
|
||||
ensure_user=True)
|
||||
cluster_hosts = {}
|
||||
cluster_hosts['self'] = config['hostname']
|
||||
for r_id in relation_ids('cluster'):
|
||||
for unit in relation_list(r_id):
|
||||
# trigger identity-changed to reconfigure HTTPS
|
||||
# as necessary.
|
||||
identity_changed(relation_id=r_id, remote_unit=unit)
|
||||
cluster_hosts[unit.replace('/','-')] = \
|
||||
relation_get_dict(relation_id=r_id,
|
||||
remote_unit=unit)['private-address']
|
||||
configure_haproxy(cluster_hosts,
|
||||
SERVICE_PORTS)
|
||||
|
||||
synchronize_service_credentials()
|
||||
service_ports = {
|
||||
"keystone_admin": \
|
||||
cluster.determine_haproxy_port(config['admin-port']),
|
||||
"keystone_service": \
|
||||
cluster.determine_haproxy_port(config['service-port'])
|
||||
}
|
||||
haproxy.configure_haproxy(service_ports)
|
||||
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
# trigger identity-changed to reconfigure HTTPS as necessary
|
||||
identity_changed(relation_id=r_id, remote_unit=unit)
|
||||
|
||||
def ha_relation_changed():
|
||||
relation_data = relation_get_dict()
|
||||
if ('clustered' in relation_data and
|
||||
is_leader()):
|
||||
juju_log('Cluster configured, notifying other services and updating'
|
||||
'keystone endpoint configuration')
|
||||
cluster.is_leader(CLUSTER_RES)):
|
||||
utils.juju_log('INFO',
|
||||
'Cluster configured, notifying other services'
|
||||
' and updating keystone endpoint configuration')
|
||||
# Update keystone endpoint to point at VIP
|
||||
ensure_initial_admin(config)
|
||||
# Tell all related services to start using
|
||||
# the VIP and haproxy ports instead
|
||||
for r_id in relation_ids('identity-service'):
|
||||
relation_set_2(rid=r_id,
|
||||
for r_id in utils.relation_ids('identity-service'):
|
||||
utils.relation_set(rid=r_id,
|
||||
auth_host=config['vip'],
|
||||
service_host=config['vip'],
|
||||
service_port=SERVICE_PORTS['keystone_service'],
|
||||
auth_port=SERVICE_PORTS['keystone_admin'])
|
||||
service_host=config['vip'])
|
||||
|
||||
|
||||
def ha_relation_joined():
|
||||
@ -424,41 +454,33 @@ def ha_relation_joined():
|
||||
# include multicast port and interface to bind to.
|
||||
corosync_bindiface = config['ha-bindiface']
|
||||
corosync_mcastport = config['ha-mcastport']
|
||||
vip = config['vip']
|
||||
vip_cidr = config['vip_cidr']
|
||||
vip_iface = config['vip_iface']
|
||||
|
||||
# Obtain resources
|
||||
resources = {
|
||||
'res_ks_vip': 'ocf:heartbeat:IPaddr2',
|
||||
'res_ks_haproxy': 'lsb:haproxy'
|
||||
}
|
||||
# TODO: Obtain netmask and nic where to place VIP.
|
||||
resource_params = {
|
||||
'res_ks_vip':'params ip="%s" cidr_netmask="%s" nic="%s"' % (config['vip'],
|
||||
config['vip_cidr'], config['vip_iface']),
|
||||
'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
|
||||
(vip, vip_cidr, vip_iface),
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'
|
||||
}
|
||||
init_services = {
|
||||
'res_ks_haproxy': 'haproxy'
|
||||
}
|
||||
groups = {
|
||||
'grp_ks_haproxy':'res_ks_vip res_ks_haproxy'
|
||||
clones = {
|
||||
'gl_ks_haproxy': 'res_ks_haproxy'
|
||||
}
|
||||
#clones = {
|
||||
# 'cln_ks_haproxy':'res_ks_haproxy meta globally-unique="false" interleave="true"'
|
||||
# }
|
||||
|
||||
#orders = {
|
||||
# 'ord_vip_before_haproxy':'inf: res_ks_vip res_ks_haproxy'
|
||||
# }
|
||||
#colocations = {
|
||||
# 'col_vip_on_haproxy':'inf: res_ks_haproxy res_ks_vip'
|
||||
# }
|
||||
|
||||
relation_set_2(init_services=init_services,
|
||||
utils.relation_set(init_services=init_services,
|
||||
corosync_bindiface=corosync_bindiface,
|
||||
corosync_mcastport=corosync_mcastport,
|
||||
resources=resources,
|
||||
resource_params=resource_params,
|
||||
groups=groups)
|
||||
clones=clones)
|
||||
|
||||
|
||||
hooks = {
|
||||
@ -476,9 +498,4 @@ hooks = {
|
||||
"upgrade-charm": upgrade_charm
|
||||
}
|
||||
|
||||
# keystone-hooks gets called by symlink corresponding to the requested relation
|
||||
# hook.
|
||||
hook = os.path.basename(sys.argv[0])
|
||||
if hook not in hooks.keys():
|
||||
error_out("Unsupported hook: %s" % hook)
|
||||
hooks[hook]()
|
||||
utils.do_hooks(hooks)
|
@ -1,17 +1,23 @@
|
||||
#!/usr/bin/python
|
||||
import ConfigParser
|
||||
import subprocess
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from lib.openstack_common import *
|
||||
from lib.openstack_common import(
|
||||
get_os_codename_install_source,
|
||||
get_os_codename_package,
|
||||
error_out,
|
||||
configure_installation_source
|
||||
)
|
||||
|
||||
import keystone_ssl as ssl
|
||||
import lib.unison as unison
|
||||
import lib.utils as utils
|
||||
import lib.cluster_utils as cluster
|
||||
|
||||
|
||||
keystone_conf = "/etc/keystone/keystone.conf"
|
||||
stored_passwd = "/var/lib/keystone/keystone.passwd"
|
||||
@ -20,9 +26,10 @@ SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd'
|
||||
|
||||
SSL_DIR = '/var/lib/keystone/juju_ssl/'
|
||||
SSL_CA_NAME = 'Ubuntu Cloud'
|
||||
|
||||
CLUSTER_RES = 'res_ks_vip'
|
||||
SSH_USER = 'juju_keystone'
|
||||
|
||||
|
||||
def execute(cmd, die=False, echo=False):
|
||||
""" Executes a command
|
||||
|
||||
@ -71,68 +78,9 @@ def config_get():
|
||||
if not config[c]:
|
||||
error_out("ERROR: Config option has no paramter: %s" % c)
|
||||
# tack on our private address and ip
|
||||
hostname = execute("unit-get private-address")[0].strip()
|
||||
config["hostname"] = execute("unit-get private-address")[0].strip()
|
||||
config["hostname"] = utils.unit_get('private-address')
|
||||
return config
|
||||
|
||||
def relation_ids(relation_name=None):
|
||||
j = execute('relation-ids --format=json %s' % relation_name)[0]
|
||||
return json.loads(j)
|
||||
|
||||
def relation_list(relation_id=None):
|
||||
cmd = 'relation-list --format=json'
|
||||
if relation_id:
|
||||
cmd += ' -r %s' % relation_id
|
||||
j = execute(cmd)[0]
|
||||
return json.loads(j)
|
||||
|
||||
def relation_set(relation_data):
|
||||
""" calls relation-set for all key=values in dict """
|
||||
for k in relation_data:
|
||||
execute("relation-set %s=%s" % (k, relation_data[k]), die=True)
|
||||
|
||||
def relation_set_2(**kwargs):
|
||||
cmd = [
|
||||
'relation-set'
|
||||
]
|
||||
args = []
|
||||
for k, v in kwargs.items():
|
||||
if k == 'rid':
|
||||
cmd.append('-r')
|
||||
cmd.append(v)
|
||||
else:
|
||||
args.append('{}={}'.format(k, v))
|
||||
cmd += args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def unit_get(attribute):
|
||||
cmd = [
|
||||
'unit-get',
|
||||
attribute
|
||||
]
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def relation_get(relation_data):
|
||||
""" Obtain all current relation data
|
||||
relation_data is a list of options to query from the relation
|
||||
Returns a k,v dict of the results.
|
||||
Leave empty responses out of the results as they haven't yet been
|
||||
set on the other end.
|
||||
Caller can then "len(results.keys()) == len(relation_data)" to find out if
|
||||
all relation values have been set on the other side
|
||||
"""
|
||||
results = {}
|
||||
for r in relation_data:
|
||||
result = execute("relation-get %s" % r, die=True)[0].strip('\n')
|
||||
if result != "":
|
||||
results[r] = result
|
||||
return results
|
||||
|
||||
def relation_get_dict(relation_id=None, remote_unit=None):
|
||||
"""Obtain all relation data as dict by way of JSON"""
|
||||
@ -152,18 +100,23 @@ def relation_get_dict(relation_id=None, remote_unit=None):
|
||||
settings[str(k)] = str(v)
|
||||
return settings
|
||||
|
||||
|
||||
def set_admin_token(admin_token):
|
||||
"""Set admin token according to deployment config or use a randomly
|
||||
generated token if none is specified (default).
|
||||
"""
|
||||
if admin_token != 'None':
|
||||
juju_log('Configuring Keystone to use a pre-configured admin token.')
|
||||
utils.juju_log('INFO',
|
||||
'Configuring Keystone to use'
|
||||
' a pre-configured admin token.')
|
||||
token = admin_token
|
||||
else:
|
||||
juju_log('Configuring Keystone to use a random admin token.')
|
||||
utils.juju_log('INFO',
|
||||
'Configuring Keystone to use a random admin token.')
|
||||
if os.path.isfile(stored_token):
|
||||
msg = 'Loading a previously generated admin token from %s' % stored_token
|
||||
juju_log(msg)
|
||||
msg = 'Loading a previously generated' \
|
||||
' admin token from %s' % stored_token
|
||||
utils.juju_log('INFO', msg)
|
||||
f = open(stored_token, 'r')
|
||||
token = f.read().strip()
|
||||
f.close()
|
||||
@ -174,6 +127,7 @@ def set_admin_token(admin_token):
|
||||
out.close()
|
||||
update_config_block('DEFAULT', admin_token=token)
|
||||
|
||||
|
||||
def get_admin_token():
|
||||
"""Temporary utility to grab the admin token as configured in
|
||||
keystone.conf
|
||||
@ -188,6 +142,7 @@ def get_admin_token():
|
||||
keystone_conf)
|
||||
error_out('Could not find admin_token line in %s' % keystone_conf)
|
||||
|
||||
|
||||
def update_config_block(section, **kwargs):
|
||||
""" Updates keystone.conf blocks given kwargs.
|
||||
Update a config setting in a specific setting of a config
|
||||
@ -209,6 +164,7 @@ def update_config_block(section, **kwargs):
|
||||
with open(conf_file, 'wb') as out:
|
||||
config.write(out)
|
||||
|
||||
|
||||
def create_service_entry(service_name, service_type, service_desc, owner=None):
|
||||
""" Add a new service entry to keystone if one does not already exist """
|
||||
import manager
|
||||
@ -216,12 +172,15 @@ def create_service_entry(service_name, service_type, service_desc, owner=None):
|
||||
token=get_admin_token())
|
||||
for service in [s._info for s in manager.api.services.list()]:
|
||||
if service['name'] == service_name:
|
||||
juju_log("Service entry for '%s' already exists." % service_name)
|
||||
utils.juju_log('INFO',
|
||||
"Service entry for '%s' already exists." % \
|
||||
service_name)
|
||||
return
|
||||
manager.api.services.create(name=service_name,
|
||||
service_type=service_type,
|
||||
description=service_desc)
|
||||
juju_log("Created new service entry '%s'" % service_name)
|
||||
utils.juju_log('INFO', "Created new service entry '%s'" % service_name)
|
||||
|
||||
|
||||
def create_endpoint_template(region, service, publicurl, adminurl,
|
||||
internalurl):
|
||||
@ -233,7 +192,8 @@ def create_endpoint_template(region, service, publicurl, adminurl,
|
||||
service_id = manager.resolve_service_id(service)
|
||||
for ep in [e._info for e in manager.api.endpoints.list()]:
|
||||
if ep['service_id'] == service_id and ep['region'] == region:
|
||||
juju_log("Endpoint template already exists for '%s' in '%s'"
|
||||
utils.juju_log('INFO',
|
||||
"Endpoint template already exists for '%s' in '%s'"
|
||||
% (service, region))
|
||||
|
||||
up_to_date = True
|
||||
@ -245,7 +205,9 @@ def create_endpoint_template(region, service, publicurl, adminurl,
|
||||
return
|
||||
else:
|
||||
# delete endpoint and recreate if endpoint urls need updating.
|
||||
juju_log("Updating endpoint template with new endpoint urls.")
|
||||
utils.juju_log('INFO',
|
||||
"Updating endpoint template with"
|
||||
" new endpoint urls.")
|
||||
manager.api.endpoints.delete(ep['id'])
|
||||
|
||||
manager.api.endpoints.create(region=region,
|
||||
@ -253,9 +215,10 @@ def create_endpoint_template(region, service, publicurl, adminurl,
|
||||
publicurl=publicurl,
|
||||
adminurl=adminurl,
|
||||
internalurl=internalurl)
|
||||
juju_log("Created new endpoint template for '%s' in '%s'" %
|
||||
utils.juju_log('INFO', "Created new endpoint template for '%s' in '%s'" %
|
||||
(region, service))
|
||||
|
||||
|
||||
def create_tenant(name):
|
||||
""" creates a tenant if it does not already exist """
|
||||
import manager
|
||||
@ -265,9 +228,10 @@ def create_tenant(name):
|
||||
if not tenants or name not in [t['name'] for t in tenants]:
|
||||
manager.api.tenants.create(tenant_name=name,
|
||||
description='Created by Juju')
|
||||
juju_log("Created new tenant: %s" % name)
|
||||
utils.juju_log('INFO', "Created new tenant: %s" % name)
|
||||
return
|
||||
juju_log("Tenant '%s' already exists." % name)
|
||||
utils.juju_log('INFO', "Tenant '%s' already exists." % name)
|
||||
|
||||
|
||||
def create_user(name, password, tenant):
|
||||
""" creates a user if it doesn't already exist, as a member of tenant """
|
||||
@ -283,9 +247,11 @@ def create_user(name, password, tenant):
|
||||
password=password,
|
||||
email='juju@localhost',
|
||||
tenant_id=tenant_id)
|
||||
juju_log("Created new user '%s' tenant: %s" % (name, tenant_id))
|
||||
utils.juju_log('INFO', "Created new user '%s' tenant: %s" % \
|
||||
(name, tenant_id))
|
||||
return
|
||||
juju_log("A user named '%s' already exists" % name)
|
||||
utils.juju_log('INFO', "A user named '%s' already exists" % name)
|
||||
|
||||
|
||||
def create_role(name, user=None, tenant=None):
|
||||
""" creates a role if it doesn't already exist. grants role to user """
|
||||
@ -295,9 +261,9 @@ def create_role(name, user=None, tenant=None):
|
||||
roles = [r._info for r in manager.api.roles.list()]
|
||||
if not roles or name not in [r['name'] for r in roles]:
|
||||
manager.api.roles.create(name=name)
|
||||
juju_log("Created new role '%s'" % name)
|
||||
utils.juju_log('INFO', "Created new role '%s'" % name)
|
||||
else:
|
||||
juju_log("A role named '%s' already exists" % name)
|
||||
utils.juju_log('INFO', "A role named '%s' already exists" % name)
|
||||
|
||||
if not user and not tenant:
|
||||
return
|
||||
@ -308,17 +274,18 @@ def create_role(name, user=None, tenant=None):
|
||||
tenant_id = manager.resolve_tenant_id(tenant)
|
||||
|
||||
if None in [user_id, role_id, tenant_id]:
|
||||
error_out("Could not resolve [user_id, role_id, tenant_id]" %
|
||||
[user_id, role_id, tenant_id])
|
||||
error_out("Could not resolve [%s, %s, %s]" %
|
||||
(user_id, role_id, tenant_id))
|
||||
|
||||
grant_role(user, name, tenant)
|
||||
|
||||
|
||||
def grant_role(user, role, tenant):
|
||||
"""grant user+tenant a specific role"""
|
||||
import manager
|
||||
manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
|
||||
token=get_admin_token())
|
||||
juju_log("Granting user '%s' role '%s' on tenant '%s'" %\
|
||||
utils.juju_log('INFO', "Granting user '%s' role '%s' on tenant '%s'" % \
|
||||
(user, role, tenant))
|
||||
user_id = manager.resolve_user_id(user)
|
||||
role_id = manager.resolve_role_id(role)
|
||||
@ -329,12 +296,14 @@ def grant_role(user, role, tenant):
|
||||
manager.api.roles.add_user_role(user=user_id,
|
||||
role=role_id,
|
||||
tenant=tenant_id)
|
||||
juju_log("Granted user '%s' role '%s' on tenant '%s'" %\
|
||||
utils.juju_log('INFO', "Granted user '%s' role '%s' on tenant '%s'" % \
|
||||
(user, role, tenant))
|
||||
else:
|
||||
juju_log("User '%s' already has role '%s' on tenant '%s'" %\
|
||||
utils.juju_log('INFO',
|
||||
"User '%s' already has role '%s' on tenant '%s'" % \
|
||||
(user, role, tenant))
|
||||
|
||||
|
||||
def generate_admin_token(config):
|
||||
""" generate and add an admin token """
|
||||
import manager
|
||||
@ -345,12 +314,15 @@ def generate_admin_token(config):
|
||||
token = random.randrange(1000000000000, 9999999999999)
|
||||
else:
|
||||
return config["admin-token"]
|
||||
manager.api.add_token(token, config["admin-user"], "admin", config["token-expiry"])
|
||||
juju_log("Generated and added new random admin token.")
|
||||
manager.api.add_token(token, config["admin-user"],
|
||||
"admin", config["token-expiry"])
|
||||
utils.juju_log('INFO', "Generated and added new random admin token.")
|
||||
return token
|
||||
|
||||
|
||||
def ensure_initial_admin(config):
|
||||
""" Ensures the minimum admin stuff exists in whatever database we're using.
|
||||
""" Ensures the minimum admin stuff exists in whatever database we're
|
||||
using.
|
||||
This and the helper functions it calls are meant to be idempotent and
|
||||
run during install as well as during db-changed. This will maintain
|
||||
the admin tenant, user, role, service entry and endpoint across every
|
||||
@ -365,10 +337,11 @@ def ensure_initial_admin(config):
|
||||
if config["admin-password"] != "None":
|
||||
passwd = config["admin-password"]
|
||||
elif os.path.isfile(stored_passwd):
|
||||
juju_log("Loading stored passwd from %s" % stored_passwd)
|
||||
utils.juju_log('INFO', "Loading stored passwd from %s" % stored_passwd)
|
||||
passwd = open(stored_passwd, 'r').readline().strip('\n')
|
||||
if passwd == "":
|
||||
juju_log("Generating new passwd for user: %s" % config["admin-user"])
|
||||
utils.juju_log('INFO', "Generating new passwd for user: %s" % \
|
||||
config["admin-user"])
|
||||
passwd = execute("pwgen -c 16 1", die=True)[0]
|
||||
open(stored_passwd, 'w+').writelines("%s\n" % passwd)
|
||||
|
||||
@ -380,16 +353,7 @@ def ensure_initial_admin(config):
|
||||
create_role("KeystoneServiceAdmin", config["admin-user"], 'admin')
|
||||
create_service_entry("keystone", "identity", "Keystone Identity Service")
|
||||
|
||||
if is_clustered():
|
||||
juju_log("Creating endpoint for clustered configuration")
|
||||
for region in config['region'].split():
|
||||
create_keystone_endpoint(service_host=config["vip"],
|
||||
service_port=int(config["service-port"]) + 1,
|
||||
auth_host=config["vip"],
|
||||
auth_port=int(config["admin-port"]) + 1,
|
||||
region=region)
|
||||
else:
|
||||
juju_log("Creating standard endpoint")
|
||||
utils.juju_log('INFO', "Creating standard endpoint")
|
||||
for region in config['region'].split():
|
||||
create_keystone_endpoint(service_host=config["hostname"],
|
||||
service_port=config["service-port"],
|
||||
@ -411,14 +375,16 @@ def update_user_password(username, password):
|
||||
import manager
|
||||
manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
|
||||
token=get_admin_token())
|
||||
juju_log("Updating password for user '%s'" % username)
|
||||
utils.juju_log('INFO', "Updating password for user '%s'" % username)
|
||||
|
||||
user_id = manager.resolve_user_id(username)
|
||||
if user_id is None:
|
||||
error_out("Could not resolve user id for '%s'" % username)
|
||||
|
||||
manager.api.users.update_password(user=user_id, password=password)
|
||||
juju_log("Successfully updated password for user '%s'" % username)
|
||||
utils.juju_log('INFO', "Successfully updated password for user '%s'" % \
|
||||
username)
|
||||
|
||||
|
||||
def load_stored_passwords(path=SERVICE_PASSWD_PATH):
|
||||
creds = {}
|
||||
@ -431,10 +397,12 @@ def load_stored_passwords(path=SERVICE_PASSWD_PATH):
|
||||
creds[user] = passwd
|
||||
return creds
|
||||
|
||||
|
||||
def save_stored_passwords(path=SERVICE_PASSWD_PATH, **creds):
|
||||
with open(path, 'wb') as stored_passwd:
|
||||
[stored_passwd.write('%s:%s\n' % (u, p)) for u, p in creds.iteritems()]
|
||||
|
||||
|
||||
def get_service_password(service_username):
|
||||
creds = load_stored_passwords()
|
||||
if service_username in creds:
|
||||
@ -446,12 +414,13 @@ def get_service_password(service_username):
|
||||
|
||||
return passwd
|
||||
|
||||
|
||||
def configure_pki_tokens(config):
|
||||
'''Configure PKI token signing, if enabled.'''
|
||||
if config['enable-pki'] not in ['True', 'true']:
|
||||
update_config_block('signing', token_format='UUID')
|
||||
else:
|
||||
juju_log('TODO: PKI Support, setting to UUID for now.')
|
||||
utils.juju_log('INFO', 'TODO: PKI Support, setting to UUID for now.')
|
||||
update_config_block('signing', token_format='UUID')
|
||||
|
||||
|
||||
@ -462,10 +431,12 @@ def do_openstack_upgrade(install_src, packages):
|
||||
old_vers = get_os_codename_package('keystone')
|
||||
new_vers = get_os_codename_install_source(install_src)
|
||||
|
||||
juju_log("Beginning Keystone upgrade: %s -> %s" % (old_vers, new_vers))
|
||||
utils.juju_log('INFO',
|
||||
"Beginning Keystone upgrade: %s -> %s" % \
|
||||
(old_vers, new_vers))
|
||||
|
||||
# Backup previous config.
|
||||
juju_log("Backing up contents of /etc/keystone.")
|
||||
utils.juju_log('INFO', "Backing up contents of /etc/keystone.")
|
||||
stamp = time.strftime('%Y%m%d%H%M')
|
||||
cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp
|
||||
execute(cmd, die=True, echo=True)
|
||||
@ -482,14 +453,16 @@ def do_openstack_upgrade(install_src, packages):
|
||||
set_admin_token(config['admin-token'])
|
||||
|
||||
# set the sql connection string if a shared-db relation is found.
|
||||
ids = relation_ids(relation_name='shared-db')
|
||||
ids = utils.relation_ids('shared-db')
|
||||
|
||||
if ids:
|
||||
for id in ids:
|
||||
for unit in relation_list(id):
|
||||
juju_log('Configuring new keystone.conf for datbase access '\
|
||||
'on existing database relation to %s' % unit)
|
||||
relation_data = relation_get_dict(relation_id=id,
|
||||
for rid in ids:
|
||||
for unit in utils.relation_list(rid):
|
||||
utils.juju_log('INFO',
|
||||
'Configuring new keystone.conf for '
|
||||
'database access on existing database'
|
||||
' relation to %s' % unit)
|
||||
relation_data = relation_get_dict(relation_id=rid,
|
||||
remote_unit=unit)
|
||||
|
||||
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
|
||||
@ -498,66 +471,21 @@ def do_openstack_upgrade(install_src, packages):
|
||||
relation_data["private-address"],
|
||||
config["database"]))
|
||||
|
||||
execute('service keystone stop', echo=True)
|
||||
if ((is_clustered() and is_leader()) or
|
||||
not is_clustered()):
|
||||
juju_log('Running database migrations for %s' % new_vers)
|
||||
utils.stop('keystone')
|
||||
if (cluster.eligible_leader(CLUSTER_RES)):
|
||||
utils.juju_log('INFO',
|
||||
'Running database migrations for %s' % new_vers)
|
||||
execute('keystone-manage db_sync', echo=True, die=True)
|
||||
else:
|
||||
juju_log('Not cluster leader; snoozing whilst leader upgrades DB')
|
||||
utils.juju_log('INFO',
|
||||
'Not cluster leader; snoozing whilst'
|
||||
' leader upgrades DB')
|
||||
time.sleep(10)
|
||||
execute('service keystone start', echo=True)
|
||||
utils.start('keystone')
|
||||
time.sleep(5)
|
||||
juju_log('Completed Keystone upgrade: %s -> %s' % (old_vers, new_vers))
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
relation_data = \
|
||||
relation_get_dict(relation_id=r_id,
|
||||
remote_unit=unit)
|
||||
if 'clustered' in relation_data:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_leader():
|
||||
status = execute('crm resource show res_ks_vip', echo=True)[0].strip()
|
||||
hostname = execute('hostname', echo=True)[0].strip()
|
||||
if hostname in status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def peer_units():
|
||||
peers = []
|
||||
for r_id in (relation_ids('cluster') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
peers.append(unit)
|
||||
return peers
|
||||
|
||||
def oldest_peer(peers):
|
||||
local_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
|
||||
for peer in peers:
|
||||
remote_unit_no = peer.split('/')[1]
|
||||
if remote_unit_no < local_unit_no:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def eligible_leader():
|
||||
if is_clustered():
|
||||
if not is_leader():
|
||||
juju_log('Deferring action to CRM leader.')
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
juju_log('Deferring action to oldest service unit.')
|
||||
return False
|
||||
return True
|
||||
utils.juju_log('INFO',
|
||||
'Completed Keystone upgrade: '
|
||||
'%s -> %s' % (old_vers, new_vers))
|
||||
|
||||
|
||||
def synchronize_service_credentials():
|
||||
@ -565,15 +493,17 @@ def synchronize_service_credentials():
|
||||
Broadcast service credentials to peers or consume those that have been
|
||||
broadcasted by peer, depending on hook context.
|
||||
'''
|
||||
if (not eligible_leader() or
|
||||
if (not cluster.eligible_leader(CLUSTER_RES) or
|
||||
not os.path.isfile(SERVICE_PASSWD_PATH)):
|
||||
return
|
||||
juju_log('Synchronizing service passwords to all peers.')
|
||||
utils.juju_log('INFO', 'Synchronizing service passwords to all peers.')
|
||||
unison.sync_to_peers(peer_interface='cluster',
|
||||
paths=[SERVICE_PASSWD_PATH], user=SSH_USER,
|
||||
verbose=True)
|
||||
|
||||
CA = []
|
||||
|
||||
|
||||
def get_ca(user='keystone', group='keystone'):
|
||||
"""
|
||||
Initialize a new CA object if one hasn't already been loaded.
|
@ -34,6 +34,7 @@ swift_codenames = {
|
||||
'1.7.7': 'grizzly',
|
||||
}
|
||||
|
||||
|
||||
def juju_log(msg):
|
||||
subprocess.check_call(['juju-log', msg])
|
||||
|
||||
@ -78,6 +79,7 @@ def get_os_codename_install_source(src):
|
||||
if v in src:
|
||||
return v
|
||||
|
||||
|
||||
def get_os_codename_version(vers):
|
||||
'''Determine OpenStack codename from version number.'''
|
||||
try:
|
||||
@ -136,6 +138,7 @@ def get_os_version_package(pkg):
|
||||
e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
error_out(e)
|
||||
|
||||
|
||||
def configure_installation_source(rel):
|
||||
'''Configure apt installation source.'''
|
||||
|
||||
@ -211,6 +214,7 @@ def configure_installation_source(rel):
|
||||
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||
HAPROXY_DEFAULT = '/etc/default/haproxy'
|
||||
|
||||
|
||||
def configure_haproxy(units, service_ports, template_dir=None):
|
||||
template_dir = template_dir or 'templates'
|
||||
import jinja2
|
||||
@ -229,6 +233,7 @@ def configure_haproxy(units, service_ports, template_dir=None):
|
||||
with open(HAPROXY_DEFAULT, 'w') as f:
|
||||
f.write('ENABLED=1')
|
||||
|
||||
|
||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
"""
|
||||
Write an rc file in the charm-delivered directory containing
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
from keystoneclient.v2_0 import client
|
||||
|
||||
|
||||
class KeystoneManager(object):
|
||||
def __init__(self, endpoint, token):
|
||||
self.api = client.Client(endpoint=endpoint, token=token)
|
||||
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
@ -1 +1 @@
|
||||
keystone-hooks
|
||||
keystone_hooks.py
|
Loading…
Reference in New Issue
Block a user