Re-instate a load of lost function
This commit is contained in:
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
@@ -29,6 +29,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
|||||||
determine_apache_port,
|
determine_apache_port,
|
||||||
determine_api_port,
|
determine_api_port,
|
||||||
https,
|
https,
|
||||||
|
is_clustered
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.hahelpers.apache import (
|
from charmhelpers.contrib.hahelpers.apache import (
|
||||||
@@ -240,17 +241,19 @@ class CephContext(OSContextGenerator):
|
|||||||
'''This generates context for /etc/ceph/ceph.conf templates'''
|
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||||
if not relation_ids('ceph'):
|
if not relation_ids('ceph'):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
log('Generating template context for ceph')
|
log('Generating template context for ceph')
|
||||||
|
|
||||||
mon_hosts = []
|
mon_hosts = []
|
||||||
auth = None
|
auth = None
|
||||||
key = None
|
key = None
|
||||||
|
use_syslog = str(config('use-syslog')).lower()
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids('ceph'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
mon_hosts.append(relation_get('private-address', rid=rid,
|
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||||
unit=unit))
|
unit=unit))
|
||||||
auth = relation_get('auth', rid=rid, unit=unit)
|
auth = relation_get('auth', rid=rid, unit=unit)
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
use_syslog = str(config('use-syslog')).lower()
|
|
||||||
|
|
||||||
ctxt = {
|
ctxt = {
|
||||||
'mon_hosts': ' '.join(mon_hosts),
|
'mon_hosts': ' '.join(mon_hosts),
|
||||||
@@ -393,7 +396,7 @@ class ApacheSSLContext(OSContextGenerator):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class NeutronContext(object):
|
class NeutronContext(OSContextGenerator):
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -454,6 +457,22 @@ class NeutronContext(object):
|
|||||||
|
|
||||||
return nvp_ctxt
|
return nvp_ctxt
|
||||||
|
|
||||||
|
def neutron_ctxt(self):
|
||||||
|
if https():
|
||||||
|
proto = 'https'
|
||||||
|
else:
|
||||||
|
proto = 'http'
|
||||||
|
if is_clustered():
|
||||||
|
host = config('vip')
|
||||||
|
else:
|
||||||
|
host = unit_get('private-address')
|
||||||
|
url = '%s://%s:%s' % (proto, host, '9292')
|
||||||
|
ctxt = {
|
||||||
|
'network_manager': self.network_manager,
|
||||||
|
'neutron_url': url,
|
||||||
|
}
|
||||||
|
return ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self._ensure_packages()
|
self._ensure_packages()
|
||||||
|
|
||||||
@@ -463,7 +482,7 @@ class NeutronContext(object):
|
|||||||
if not self.plugin:
|
if not self.plugin:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
ctxt = {'network_manager': self.network_manager}
|
ctxt = self.neutron_ctxt()
|
||||||
|
|
||||||
if self.plugin == 'ovs':
|
if self.plugin == 'ovs':
|
||||||
ctxt.update(self.ovs_ctxt())
|
ctxt.update(self.ovs_ctxt())
|
||||||
|
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
from subprocess import check_call
|
from subprocess import check_call
|
||||||
|
|
||||||
@@ -21,11 +22,14 @@ from charmhelpers.core.hookenv import (
|
|||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
mkdir,
|
mkdir,
|
||||||
restart_on_change,
|
restart_on_change,
|
||||||
service_restart
|
service_restart,
|
||||||
|
service_stop,
|
||||||
|
service_start
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
apt_install, apt_update
|
apt_install, apt_update,
|
||||||
|
filter_installed_packages
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.utils import (
|
from charmhelpers.contrib.openstack.utils import (
|
||||||
@@ -60,6 +64,7 @@ from charmhelpers.payload.execd import execd_preinstall
|
|||||||
hooks = Hooks()
|
hooks = Hooks()
|
||||||
CONFIGS = register_configs()
|
CONFIGS = register_configs()
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook()
|
@hooks.hook()
|
||||||
def install():
|
def install():
|
||||||
execd_preinstall()
|
execd_preinstall()
|
||||||
@@ -67,21 +72,25 @@ def install():
|
|||||||
apt_update()
|
apt_update()
|
||||||
apt_install(determine_packages(), fatal=True)
|
apt_install(determine_packages(), fatal=True)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('config-changed')
|
@hooks.hook('config-changed')
|
||||||
@restart_on_change(restart_map(), stopstart=True)
|
|
||||||
def config_changed():
|
def config_changed():
|
||||||
unison.ensure_user(user=SSH_USER, group='juju_keystone')
|
unison.ensure_user(user=SSH_USER, group='keystone')
|
||||||
homedir = unison.get_homedir(SSH_USER)
|
homedir = unison.get_homedir(SSH_USER)
|
||||||
if not os.path.isdir(homedir):
|
if not os.path.isdir(homedir):
|
||||||
mkdir(homedir, SSH_USER, 'juju_keystone', 0775)
|
mkdir(homedir, SSH_USER, 'keystone', 0775)
|
||||||
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
|
|
||||||
if openstack_upgrade_available('keystone'):
|
if openstack_upgrade_available('keystone'):
|
||||||
do_openstack_upgrade(configs=CONFIGS)
|
do_openstack_upgrade(configs=CONFIGS)
|
||||||
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
|
|
||||||
|
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
|
||||||
|
|
||||||
save_script_rc()
|
save_script_rc()
|
||||||
configure_https()
|
configure_https()
|
||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
service_restart('keystone')
|
service_restart('keystone')
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
if eligible_leader(CLUSTER_RES):
|
if eligible_leader(CLUSTER_RES):
|
||||||
migrate_database()
|
migrate_database()
|
||||||
ensure_initial_admin(config)
|
ensure_initial_admin(config)
|
||||||
@@ -89,9 +98,9 @@ def config_changed():
|
|||||||
# HTTPS may have been set - so fire all identity relations
|
# HTTPS may have been set - so fire all identity relations
|
||||||
# again
|
# again
|
||||||
for r_id in relation_ids('identity-service'):
|
for r_id in relation_ids('identity-service'):
|
||||||
for unit in relation_list(r_id):
|
for unit in relation_list(r_id):
|
||||||
identity_changed(relation_id=r_id,
|
identity_changed(relation_id=r_id,
|
||||||
remote_unit=unit)
|
remote_unit=unit)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('shared-db-relation-joined')
|
@hooks.hook('shared-db-relation-joined')
|
||||||
@@ -106,12 +115,11 @@ def db_joined():
|
|||||||
def db_changed():
|
def db_changed():
|
||||||
if 'shared-db' not in CONFIGS.complete_contexts():
|
if 'shared-db' not in CONFIGS.complete_contexts():
|
||||||
log('shared-db relation incomplete. Peer not ready?')
|
log('shared-db relation incomplete. Peer not ready?')
|
||||||
return
|
else:
|
||||||
CONFIGS.write(KEYSTONE_CONF)
|
CONFIGS.write(KEYSTONE_CONF)
|
||||||
service_restart('keystone')
|
if eligible_leader(CLUSTER_RES):
|
||||||
if eligible_leader(CLUSTER_RES):
|
migrate_database()
|
||||||
migrate_database()
|
ensure_initial_admin(config)
|
||||||
ensure_initial_admin(config)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('identity-service-relation-joined')
|
@hooks.hook('identity-service-relation-joined')
|
||||||
@@ -121,16 +129,12 @@ def identity_joined():
|
|||||||
|
|
||||||
|
|
||||||
@hooks.hook('identity-service-relation-changed')
|
@hooks.hook('identity-service-relation-changed')
|
||||||
@restart_on_change(restart_map())
|
def identity_changed(relation_id=None, remote_unit=None):
|
||||||
def identity_changed():
|
|
||||||
if not eligible_leader(CLUSTER_RES):
|
|
||||||
log('Deferring identity_changed() to service leader.')
|
|
||||||
#if 'identity-service' not in CONFIGS.complete_contexts():
|
|
||||||
# return
|
|
||||||
if eligible_leader(CLUSTER_RES):
|
if eligible_leader(CLUSTER_RES):
|
||||||
add_service_to_keystone()
|
add_service_to_keystone(relation_id, remote_unit)
|
||||||
synchronize_service_credentials()
|
synchronize_service_credentials()
|
||||||
|
else:
|
||||||
|
log('Deferring identity_changed() to service leader.')
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('cluster-relation-joined')
|
@hooks.hook('cluster-relation-joined')
|
||||||
@@ -146,7 +150,7 @@ def cluster_joined():
|
|||||||
@restart_on_change(restart_map(), stopstart=True)
|
@restart_on_change(restart_map(), stopstart=True)
|
||||||
def cluster_changed():
|
def cluster_changed():
|
||||||
unison.ssh_authorized_peers(user=SSH_USER,
|
unison.ssh_authorized_peers(user=SSH_USER,
|
||||||
group='juju_keystone',
|
group='keystone',
|
||||||
peer_interface='cluster',
|
peer_interface='cluster',
|
||||||
ensure_local_user=True)
|
ensure_local_user=True)
|
||||||
synchronize_service_credentials()
|
synchronize_service_credentials()
|
||||||
@@ -183,18 +187,15 @@ def ha_joined():
|
|||||||
@hooks.hook('ha-relation-changed')
|
@hooks.hook('ha-relation-changed')
|
||||||
def ha_changed():
|
def ha_changed():
|
||||||
clustered = relation_get('clustered')
|
clustered = relation_get('clustered')
|
||||||
if not clustered or clustered in [None, 'None', '']:
|
if (clustered is not None and
|
||||||
log('ha_changed: hacluster subordinate not fully clustered.')
|
is_leader(CLUSTER_RES)):
|
||||||
return
|
ensure_initial_admin(config)
|
||||||
if not is_leader(CLUSTER_RES):
|
log('Cluster configured, notifying other services and updating '
|
||||||
log('ha_changed: hacluster complete but we are not leader.')
|
'keystone endpoint configuration')
|
||||||
return
|
for rid in relation_ids('identity-service'):
|
||||||
ensure_initial_admin(config)
|
relation_set(rid=rid,
|
||||||
log('Cluster configured, notifying other services and updating '
|
auth_host=config('vip'),
|
||||||
'keystone endpoint configuration')
|
service_host=config('vip'))
|
||||||
for rid in relation_ids('identity-service'):
|
|
||||||
identity_joined(rid=rid)
|
|
||||||
CONFIGS.write_all()
|
|
||||||
|
|
||||||
|
|
||||||
def configure_https():
|
def configure_https():
|
||||||
@@ -212,16 +213,16 @@ def configure_https():
|
|||||||
cmd = ['a2dissite', 'openstack_https_frontend']
|
cmd = ['a2dissite', 'openstack_https_frontend']
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
for rid in relation_ids('identity-service'):
|
|
||||||
identity_joined(rid=rid)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('upgrade-charm')
|
@hooks.hook('upgrade-charm')
|
||||||
|
@restart_on_change(restart_map(), stopstart=True)
|
||||||
def upgrade_charm():
|
def upgrade_charm():
|
||||||
if openstack_upgrade_available('keystone'):
|
apt_install(filter_installed_packages(determine_packages()))
|
||||||
do_openstack_upgrade(configs=CONFIGS)
|
cluster_changed()
|
||||||
save_script_rc()
|
if eligible_leader(CLUSTER_RES):
|
||||||
configure_https()
|
log('Cluster leader - ensuring endpoint configuration'
|
||||||
|
' is up to date')
|
||||||
|
ensure_initial_admin(config)
|
||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
|
|
||||||
|
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
import urlparse
|
import urlparse
|
||||||
|
import time
|
||||||
|
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
@@ -39,6 +40,11 @@ from charmhelpers.fetch import (
|
|||||||
apt_update,
|
apt_update,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
service_stop,
|
||||||
|
service_start,
|
||||||
|
)
|
||||||
|
|
||||||
import keystone_context
|
import keystone_context
|
||||||
import keystone_ssl as ssl
|
import keystone_ssl as ssl
|
||||||
|
|
||||||
@@ -155,6 +161,7 @@ valid_services = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def resource_map():
|
def resource_map():
|
||||||
'''
|
'''
|
||||||
Dynamically generate a map of resources that will be managed for a single
|
Dynamically generate a map of resources that will be managed for a single
|
||||||
@@ -233,11 +240,15 @@ def do_openstack_upgrade(configs):
|
|||||||
if eligible_leader(CLUSTER_RES):
|
if eligible_leader(CLUSTER_RES):
|
||||||
migrate_database()
|
migrate_database()
|
||||||
|
|
||||||
|
|
||||||
def migrate_database():
|
def migrate_database():
|
||||||
'''Runs keystone-manage to initialize a new database or migrate existing'''
|
'''Runs keystone-manage to initialize a new database or migrate existing'''
|
||||||
log('Migrating the keystone database.', level=INFO)
|
log('Migrating the keystone database.', level=INFO)
|
||||||
|
service_stop('keystone')
|
||||||
cmd = ['keystone-manage', 'db_sync']
|
cmd = ['keystone-manage', 'db_sync']
|
||||||
subprocess.check_output(cmd)
|
subprocess.check_output(cmd)
|
||||||
|
service_start('keystone')
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
|
||||||
## OLD
|
## OLD
|
||||||
@@ -578,8 +589,9 @@ def relation_list(rid):
|
|||||||
else:
|
else:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def add_service_to_keystone():
|
|
||||||
settings = relation_get()
|
def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||||
|
settings = relation_get(rid=relation_id, unit=remote_unit)
|
||||||
# the minimum settings needed per endpoint
|
# the minimum settings needed per endpoint
|
||||||
single = set(['service', 'region', 'public_url', 'admin_url',
|
single = set(['service', 'region', 'public_url', 'admin_url',
|
||||||
'internal_url'])
|
'internal_url'])
|
||||||
@@ -610,7 +622,8 @@ def add_service_to_keystone():
|
|||||||
for role in get_requested_roles(settings):
|
for role in get_requested_roles(settings):
|
||||||
log("Creating requested role: %s" % role)
|
log("Creating requested role: %s" % role)
|
||||||
create_role(role)
|
create_role(role)
|
||||||
relation_set(**relation_data)
|
relation_set(relation_id=relation_id,
|
||||||
|
**relation_data)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
ensure_valid_service(settings['service'])
|
ensure_valid_service(settings['service'])
|
||||||
@@ -724,7 +737,8 @@ def add_service_to_keystone():
|
|||||||
if is_clustered():
|
if is_clustered():
|
||||||
unison.sync_to_peers(peer_interface='cluster',
|
unison.sync_to_peers(peer_interface='cluster',
|
||||||
paths=[SSL_DIR], user=SSH_USER, verbose=True)
|
paths=[SSL_DIR], user=SSH_USER, verbose=True)
|
||||||
relation_set(**relation_data)
|
relation_set(relation_id=relation_id,
|
||||||
|
**relation_data)
|
||||||
|
|
||||||
|
|
||||||
def ensure_valid_service(service):
|
def ensure_valid_service(service):
|
||||||
|
Reference in New Issue
Block a user