Re-instate a load of lost function

This commit is contained in:
James Page
2014-02-26 16:54:26 +00:00
parent c648f7d196
commit 0c0c5ff9c0
8 changed files with 85 additions and 51 deletions

View File

View File

@@ -29,6 +29,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
https,
is_clustered
)
from charmhelpers.contrib.hahelpers.apache import (
@@ -240,17 +241,19 @@ class CephContext(OSContextGenerator):
'''This generates context for /etc/ceph/ceph.conf templates'''
if not relation_ids('ceph'):
return {}
log('Generating template context for ceph')
mon_hosts = []
auth = None
key = None
use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit)
use_syslog = str(config('use-syslog')).lower()
ctxt = {
'mon_hosts': ' '.join(mon_hosts),
@@ -393,7 +396,7 @@ class ApacheSSLContext(OSContextGenerator):
return ctxt
class NeutronContext(object):
class NeutronContext(OSContextGenerator):
interfaces = []
@property
@@ -454,6 +457,22 @@ class NeutronContext(object):
return nvp_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
else:
proto = 'http'
if is_clustered():
host = config('vip')
else:
host = unit_get('private-address')
url = '%s://%s:%s' % (proto, host, '9292')
ctxt = {
'network_manager': self.network_manager,
'neutron_url': url,
}
return ctxt
def __call__(self):
self._ensure_packages()
@@ -463,7 +482,7 @@ class NeutronContext(object):
if not self.plugin:
return {}
ctxt = {'network_manager': self.network_manager}
ctxt = self.neutron_ctxt()
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())

View File

@@ -2,6 +2,7 @@
import os
import sys
import time
from subprocess import check_call
@@ -21,11 +22,14 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.host import (
mkdir,
restart_on_change,
service_restart
service_restart,
service_stop,
service_start
)
from charmhelpers.fetch import (
apt_install, apt_update
apt_install, apt_update,
filter_installed_packages
)
from charmhelpers.contrib.openstack.utils import (
@@ -60,6 +64,7 @@ from charmhelpers.payload.execd import execd_preinstall
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook()
def install():
execd_preinstall()
@@ -67,21 +72,25 @@ def install():
apt_update()
apt_install(determine_packages(), fatal=True)
@hooks.hook('config-changed')
@restart_on_change(restart_map(), stopstart=True)
def config_changed():
unison.ensure_user(user=SSH_USER, group='juju_keystone')
unison.ensure_user(user=SSH_USER, group='keystone')
homedir = unison.get_homedir(SSH_USER)
if not os.path.isdir(homedir):
mkdir(homedir, SSH_USER, 'juju_keystone', 0775)
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
mkdir(homedir, SSH_USER, 'keystone', 0775)
if openstack_upgrade_available('keystone'):
do_openstack_upgrade(configs=CONFIGS)
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
save_script_rc()
configure_https()
CONFIGS.write_all()
service_restart('keystone')
time.sleep(10)
if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
@@ -89,9 +98,9 @@ def config_changed():
# HTTPS may have been set - so fire all identity relations
# again
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
@hooks.hook('shared-db-relation-joined')
@@ -106,12 +115,11 @@ def db_joined():
def db_changed():
if 'shared-db' not in CONFIGS.complete_contexts():
log('shared-db relation incomplete. Peer not ready?')
return
CONFIGS.write(KEYSTONE_CONF)
service_restart('keystone')
if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
else:
CONFIGS.write(KEYSTONE_CONF)
if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
@hooks.hook('identity-service-relation-joined')
@@ -121,16 +129,12 @@ def identity_joined():
@hooks.hook('identity-service-relation-changed')
@restart_on_change(restart_map())
def identity_changed():
if not eligible_leader(CLUSTER_RES):
log('Deferring identity_changed() to service leader.')
#if 'identity-service' not in CONFIGS.complete_contexts():
# return
def identity_changed(relation_id=None, remote_unit=None):
if eligible_leader(CLUSTER_RES):
add_service_to_keystone()
add_service_to_keystone(relation_id, remote_unit)
synchronize_service_credentials()
else:
log('Deferring identity_changed() to service leader.')
@hooks.hook('cluster-relation-joined')
@@ -146,7 +150,7 @@ def cluster_joined():
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
unison.ssh_authorized_peers(user=SSH_USER,
group='juju_keystone',
group='keystone',
peer_interface='cluster',
ensure_local_user=True)
synchronize_service_credentials()
@@ -183,18 +187,15 @@ def ha_joined():
@hooks.hook('ha-relation-changed')
def ha_changed():
clustered = relation_get('clustered')
if not clustered or clustered in [None, 'None', '']:
log('ha_changed: hacluster subordinate not fully clustered.')
return
if not is_leader(CLUSTER_RES):
log('ha_changed: hacluster complete but we are not leader.')
return
ensure_initial_admin(config)
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
identity_joined(rid=rid)
CONFIGS.write_all()
if (clustered is not None and
is_leader(CLUSTER_RES)):
ensure_initial_admin(config)
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
relation_set(rid=rid,
auth_host=config('vip'),
service_host=config('vip'))
def configure_https():
@@ -212,16 +213,16 @@ def configure_https():
cmd = ['a2dissite', 'openstack_https_frontend']
check_call(cmd)
for rid in relation_ids('identity-service'):
identity_joined(rid=rid)
@hooks.hook('upgrade-charm')
@restart_on_change(restart_map(), stopstart=True)
def upgrade_charm():
if openstack_upgrade_available('keystone'):
do_openstack_upgrade(configs=CONFIGS)
save_script_rc()
configure_https()
apt_install(filter_installed_packages(determine_packages()))
cluster_changed()
if eligible_leader(CLUSTER_RES):
log('Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
CONFIGS.write_all()

View File

@@ -2,6 +2,7 @@
import subprocess
import os
import urlparse
import time
from base64 import b64encode
from collections import OrderedDict
@@ -39,6 +40,11 @@ from charmhelpers.fetch import (
apt_update,
)
from charmhelpers.core.host import (
service_stop,
service_start,
)
import keystone_context
import keystone_ssl as ssl
@@ -155,6 +161,7 @@ valid_services = {
}
}
def resource_map():
'''
Dynamically generate a map of resources that will be managed for a single
@@ -233,11 +240,15 @@ def do_openstack_upgrade(configs):
if eligible_leader(CLUSTER_RES):
migrate_database()
def migrate_database():
'''Runs keystone-manage to initialize a new database or migrate existing'''
log('Migrating the keystone database.', level=INFO)
service_stop('keystone')
cmd = ['keystone-manage', 'db_sync']
subprocess.check_output(cmd)
service_start('keystone')
time.sleep(10)
## OLD
@@ -578,8 +589,9 @@ def relation_list(rid):
else:
return result
def add_service_to_keystone():
settings = relation_get()
def add_service_to_keystone(relation_id=None, remote_unit=None):
settings = relation_get(rid=relation_id, unit=remote_unit)
# the minimum settings needed per endpoint
single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url'])
@@ -610,7 +622,8 @@ def add_service_to_keystone():
for role in get_requested_roles(settings):
log("Creating requested role: %s" % role)
create_role(role)
relation_set(**relation_data)
relation_set(relation_id=relation_id,
**relation_data)
return
else:
ensure_valid_service(settings['service'])
@@ -724,7 +737,8 @@ def add_service_to_keystone():
if is_clustered():
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
relation_set(**relation_data)
relation_set(relation_id=relation_id,
**relation_data)
def ensure_valid_service(service):