Fixed a few race issues and switched to using decorators

This commit is contained in:
Edward Hope-Morley
2015-01-10 14:56:22 +00:00
parent 55e80e354a
commit a9c1e56d09
3 changed files with 346 additions and 140 deletions

View File

@@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import (
is_relation_made,
log,
local_unit,
DEBUG,
INFO,
WARNING,
ERROR,
@@ -24,6 +25,7 @@ from charmhelpers.core.hookenv import (
relation_ids,
relation_set,
related_units,
remote_unit,
unit_get,
)
@@ -50,9 +52,8 @@ from keystone_utils import (
ensure_initial_admin,
migrate_database,
save_script_rc,
synchronize_ca,
synchronize_ca_if_changed,
register_configs,
relation_list,
restart_map,
CLUSTER_RES,
KEYSTONE_CONF,
@@ -63,12 +64,13 @@ from keystone_utils import (
check_peer_actions,
CA_CERT_PATH,
ensure_permissions,
is_pending_clustered,
is_ssl_cert_master,
)
from charmhelpers.contrib.hahelpers.cluster import (
is_elected_leader,
get_hacluster_config,
peer_units,
)
from charmhelpers.payload.execd import execd_preinstall
@@ -99,12 +101,14 @@ def install():
@hooks.hook('config-changed')
@restart_on_change(restart_map())
@synchronize_ca_if_changed(fatal=False)
def config_changed():
if config('prefer-ipv6'):
setup_ipv6()
sync_db_with_multi_ipv6_addresses(config('database'),
config('database-user'))
unison.ensure_user(user=SSH_USER, group='juju_keystone')
unison.ensure_user(user=SSH_USER, group='keystone')
homedir = unison.get_homedir(SSH_USER)
if not os.path.isdir(homedir):
@@ -127,20 +131,10 @@ def config_changed():
configure_https()
CONFIGS.write_all()
if is_elected_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
log('Firing identity_changed hook for all related services.')
# HTTPS may have been set - so fire all identity relations
# again
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit, sync_certs=False)
synchronize_ca()
[cluster_joined(rid) for rid in relation_ids('cluster')]
# Update relations since SSL may have been configured. If we have peer
# units we can rely on the sync to do this in cluster relation.
if is_elected_leader(CLUSTER_RES) and not peer_units():
update_all_identity_relation_units()
@hooks.hook('shared-db-relation-joined')
@@ -173,8 +167,23 @@ def pgsql_db_joined():
relation_set(database=config('database'))
def update_all_identity_relation_units():
try:
migrate_database()
except Exception as exc:
log("Database initialisation failed (%s) - db not ready?" % (exc),
level=WARNING)
else:
ensure_initial_admin(config)
log('Firing identity_changed hook for all related services.')
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('shared-db-relation-changed')
@restart_on_change(restart_map())
@synchronize_ca_if_changed()
def db_changed():
if 'shared-db' not in CONFIGS.complete_contexts():
log('shared-db relation incomplete. Peer not ready?')
@@ -189,34 +198,28 @@ def db_changed():
if allowed_units and local_unit() not in allowed_units.split():
log('Allowed_units list provided and this unit not present')
return
migrate_database()
ensure_initial_admin(config)
# Ensure any existing service entries are updated in the
# new database backend
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
update_all_identity_relation_units()
@hooks.hook('pgsql-db-relation-changed')
@restart_on_change(restart_map())
@synchronize_ca_if_changed()
def pgsql_db_changed():
if 'pgsql-db' not in CONFIGS.complete_contexts():
log('pgsql-db relation incomplete. Peer not ready?')
else:
CONFIGS.write(KEYSTONE_CONF)
if is_elected_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
# Ensure any existing service entries are updated in the
# new database backend
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
update_all_identity_relation_units()
@hooks.hook('identity-service-relation-changed')
def identity_changed(relation_id=None, remote_unit=None, sync_certs=True):
@synchronize_ca_if_changed()
def identity_changed(relation_id=None, remote_unit=None):
notifications = {}
if is_elected_leader(CLUSTER_RES):
# Catch database not configured error and defer until db ready
@@ -235,8 +238,6 @@ def identity_changed(relation_id=None, remote_unit=None, sync_certs=True):
raise
CONFIGS.write_all()
if sync_certs:
synchronize_ca()
settings = relation_get(rid=relation_id, unit=remote_unit)
service = settings.get('service', None)
@@ -286,33 +287,55 @@ def cluster_joined(relation_id=None):
relation_settings={'private-address': private_addr})
@synchronize_ca_if_changed()
def identity_updates_with_ssl_sync():
CONFIGS.write_all()
update_all_identity_relation_units()
@synchronize_ca_if_changed(force=True)
def identity_updates_with_forced_ssl_sync():
identity_updates_with_ssl_sync()
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
check_peer_actions()
# Uncomment the following to print out all cluster relation settings in
# log (debug only).
"""
rels = ["%s:%s" % (k, v) for k, v in relation_get().iteritems()]
tag = '\n[debug:%s]' % (remote_unit())
log("PEER RELATION SETTINGS (unit=%s): %s" % (remote_unit(),
tag.join(rels)),
level=DEBUG)
"""
# NOTE(jamespage) re-echo passwords for peer storage
echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master']
echo_whitelist = ['_passwd', 'identity-service:']
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
ensure_local_user=True)
CONFIGS.write_all()
# If we have a pending cluster formation, defer following actions to the ha
# relation hook instead.
if is_pending_clustered():
log("Waiting for ha to be 'clustered' - deferring identity-updates "
"and cert sync to ha relation", level=INFO)
return
synced_units = relation_get(attribute='ssl-synced-units',
unit=local_unit())
if not synced_units or (remote_unit() not in synced_units):
log("Peer '%s' not in list of synced units (%s)" %
(remote_unit(), synced_units), level=INFO)
identity_updates_with_forced_ssl_sync()
else:
identity_updates_with_ssl_sync()
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id, remote_unit=unit,
sync_certs=False)
# If we are cert master ignore what other peers have to say
if not is_ssl_cert_master():
echo_whitelist.append('ssl-cert-master')
synchronize_ca()
# ssl cert sync must be done BEFORE this to reduce the risk of feedback
# loops in cluster relation
peer_echo(includes=echo_whitelist)
@@ -367,20 +390,16 @@ def ha_joined():
@hooks.hook('ha-relation-changed')
@restart_on_change(restart_map())
@synchronize_ca_if_changed()
def ha_changed():
clustered = relation_get('clustered')
CONFIGS.write_all()
if clustered is not None and is_elected_leader(CLUSTER_RES):
if clustered and is_elected_leader(CLUSTER_RES):
ensure_initial_admin(config)
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit,
sync_certs=False)
synchronize_ca()
update_all_identity_relation_units()
@hooks.hook('identity-admin-relation-changed')
@@ -399,6 +418,7 @@ def admin_relation_changed():
relation_set(**relation_data)
@synchronize_ca_if_changed()
def configure_https():
'''
Enables SSL API Apache config if appropriate and kicks identity-service
@@ -417,6 +437,7 @@ def configure_https():
@hooks.hook('upgrade-charm')
@restart_on_change(restart_map(), stopstart=True)
@synchronize_ca_if_changed()
def upgrade_charm():
apt_install(filter_installed_packages(determine_packages()))
unison.ssh_authorized_peers(user=SSH_USER,
@@ -424,17 +445,12 @@ def upgrade_charm():
peer_interface='cluster',
ensure_local_user=True)
if is_elected_leader(CLUSTER_RES):
log('Cluster leader - ensuring endpoint configuration'
' is up to date')
log('Cluster leader - ensuring endpoint configuration is up to '
'date', level=DEBUG)
time.sleep(10)
ensure_initial_admin(config)
# Deal with interface changes for icehouse
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit, sync_certs=False)
update_all_identity_relation_units()
CONFIGS.write_all()
synchronize_ca()
def main():

View File

@@ -1,13 +1,15 @@
#!/usr/bin/python
import glob
import grp
import subprocess
import hashlib
import os
import pwd
import uuid
import urlparse
import shutil
import subprocess
import threading
import time
import urlparse
import uuid
from base64 import b64encode
from collections import OrderedDict
@@ -56,10 +58,10 @@ from charmhelpers.core.hookenv import (
config,
log,
local_unit,
related_units,
relation_get,
relation_set,
relation_ids,
unit_get,
DEBUG,
INFO,
WARNING,
@@ -130,6 +132,7 @@ SSL_DIR = '/var/lib/keystone/juju_ssl/'
SSL_CA_NAME = 'Ubuntu Cloud'
CLUSTER_RES = 'grp_ks_vips'
SSH_USER = 'juju_keystone'
SSL_SYNC_SEMAPHORE = threading.Semaphore()
BASE_RESOURCE_MAP = OrderedDict([
(KEYSTONE_CONF, {
@@ -701,7 +704,7 @@ def is_ssl_cert_master():
master = relation_get(attribute='ssl-cert-master', rid=rid,
unit=local_unit())
if master and master == unit_get('private-address'):
if master and master == local_unit():
return True
return False
@@ -720,6 +723,39 @@ def unison_sync(paths_to_sync):
fatal=True)
def is_sync_master():
if not peer_units():
log("Not syncing certs since there are no peer units.", level=INFO)
return False
# If no ssl master elected and we are cluster leader, elect this unit.
if is_elected_leader(CLUSTER_RES):
master = []
for rid in relation_ids('cluster'):
for unit in related_units(rid):
m = relation_get(rid=rid, unit=unit,
attribute='ssl-cert-master')
if m is not None:
master.append(m)
master = set(master)
if not master or ('unknown' in master and len(master) == 1):
log("Electing this unit as ssl-cert-master", level=DEBUG)
for rid in relation_ids('cluster'):
settings = {'ssl-cert-master': local_unit(),
'ssl-synced-units': None}
relation_set(relation_id=rid, relation_settings=settings)
# Return now and wait for cluster-relation-changed for sync.
return False
if not is_ssl_cert_master():
log("Not ssl cert master - skipping sync", level=INFO)
return False
return True
def synchronize_ca(fatal=True):
"""Broadcast service credentials to peers.
@@ -733,27 +769,6 @@ def synchronize_ca(fatal=True):
"""
paths_to_sync = [SYNC_FLAGS_DIR]
if not peer_units():
log("Not syncing certs since there are no peer units.", level=INFO)
return
# If no ssl master elected and we are cluster leader, elect this unit.
if is_elected_leader(CLUSTER_RES):
master = relation_get(attribute='ssl-cert-master')
if not master or master == 'unknown':
log("Electing this unit as ssl-cert-master", level=DEBUG)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid,
relation_settings={'ssl-cert-master':
unit_get('private-address'),
'trigger': str(uuid.uuid4())})
# Return now and wait for echo before continuing.
return
if not is_ssl_cert_master():
log("Not ssl cert master - skipping sync", level=INFO)
return
if str_is_true(config('https-service-endpoints')):
log("Syncing all endpoint certs since https-service-endpoints=True",
level=DEBUG)
@@ -785,7 +800,6 @@ def synchronize_ca(fatal=True):
# new ssl keys.
create_peer_service_actions('restart', ['apache2'])
create_service_action('update-ca-certificates')
try:
unison_sync(paths_to_sync)
except:
@@ -795,23 +809,94 @@ def synchronize_ca(fatal=True):
log("Sync failed but fatal=False", level=INFO)
return
trigger = str(uuid.uuid4())
log("Sending restart-services-trigger=%s to all peers" % (trigger),
level=DEBUG)
settings = {'restart-services-trigger': trigger}
# Cleanup
shutil.rmtree(SYNC_FLAGS_DIR)
mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775)
# If we are the sync master but no longer leader then re-elect master.
if not is_elected_leader(CLUSTER_RES):
log("Re-electing ssl cert master.", level=INFO)
settings['ssl-cert-master'] = 'unknown'
trigger = str(uuid.uuid4())
log("Sending restart-services-trigger=%s to all peers" % (trigger),
level=DEBUG)
log("Sync complete - sending peer info", level=DEBUG)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, **settings)
log("Sync complete", level=DEBUG)
return {'restart-services-trigger': trigger,
'ssl-synced-units': peer_units()}
def update_hash_from_path(hash, path, recurse_depth=10):
"""Recurse through path and update the provided hash for every file found.
"""
if not recurse_depth:
log("Max recursion depth (%s) reached for update_hash_from_path() at "
"path='%s' - not going any deeper" % (recurse_depth, path),
level=WARNING)
return sum
for p in glob.glob("%s/*" % path):
if os.path.isdir(p):
update_hash_from_path(hash, p, recurse_depth=recurse_depth - 1)
else:
with open(p, 'r') as fd:
hash.update(fd.read())
def synchronize_ca_if_changed(force=False, fatal=True):
"""Decorator to perform ssl cert sync if decorated function modifies them
in any way.
If force is True a sync is done regardless.
"""
def inner_synchronize_ca_if_changed1(f):
def inner_synchronize_ca_if_changed2(*args, **kwargs):
if not is_sync_master():
return f(*args, **kwargs)
peer_settings = {}
try:
# Ensure we don't do a double sync if we are nested.
if not force and SSL_SYNC_SEMAPHORE.acquire(blocking=0):
hash1 = hashlib.sha256()
for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]:
update_hash_from_path(hash1, path)
hash1 = hash1.hexdigest()
ret = f(*args, **kwargs)
hash2 = hashlib.sha256()
for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]:
update_hash_from_path(hash2, path)
hash2 = hash2.hexdigest()
if hash1 != hash2:
log("SSL certs have changed - syncing peers",
level=DEBUG)
peer_settings = synchronize_ca(fatal=fatal)
else:
log("SSL certs have not changed - skipping sync",
level=DEBUG)
else:
ret = f(*args, **kwargs)
if force:
log("Doing forced ssl cert sync", level=DEBUG)
peer_settings = synchronize_ca(fatal=fatal)
# If we are the sync master but no longer leader then re-elect
# master.
if not is_elected_leader(CLUSTER_RES):
log("Re-electing ssl cert master.", level=INFO)
peer_settings['ssl-cert-master'] = 'unknown'
for rid in relation_ids('cluster'):
relation_set(relation_id=rid,
relation_settings=peer_settings)
return ret
finally:
SSL_SYNC_SEMAPHORE.release()
return inner_synchronize_ca_if_changed2
return inner_synchronize_ca_if_changed1
def get_ca(user='keystone', group='keystone'):
@@ -838,8 +923,11 @@ def get_ca(user='keystone', group='keystone'):
# Tag this host as the ssl cert master.
if is_clustered() or peer_units():
peer_store(key='ssl-cert-master',
value=unit_get('private-address'))
for rid in relation_ids('cluster'):
relation_set(relation_id=rid,
relation_settings={'ssl-cert-master':
local_unit(),
'synced-units': None})
ssl.CA_SINGLETON.append(ca)
@@ -1149,13 +1237,3 @@ def send_notifications(data, force=False):
level=DEBUG)
for rid in rel_ids:
relation_set(relation_id=rid, relation_settings=_notifications)
def is_pending_clustered():
"""If we have HA relations but are not yet 'clustered' return True."""
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
if not relation_get('clustered', rid=r_id, unit=unit):
return True
return False

View File

@@ -1,6 +1,7 @@
from mock import call, patch, MagicMock
import os
import json
import uuid
from test_utils import CharmTestCase
@@ -34,6 +35,7 @@ TO_PATCH = [
'relation_set',
'relation_get',
'related_units',
'remote_unit',
'unit_get',
'peer_echo',
# charmhelpers.core.host
@@ -54,7 +56,7 @@ TO_PATCH = [
'migrate_database',
'ensure_initial_admin',
'add_service_to_keystone',
'synchronize_ca',
'synchronize_ca_if_changed',
# other
'check_call',
'execd_preinstall',
@@ -158,8 +160,12 @@ class KeystoneRelationTests(CharmTestCase):
'Attempting to associate a postgresql database when there '
'is already associated a mysql one')
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch.object(hooks, 'CONFIGS')
def test_db_changed_missing_relation_data(self, configs):
def test_db_changed_missing_relation_data(self, configs, mock_peer_units,
mock_log):
mock_peer_units.return_value = None
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.db_changed()
@@ -190,9 +196,13 @@ class KeystoneRelationTests(CharmTestCase):
configs.write = MagicMock()
hooks.pgsql_db_changed()
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
def test_db_changed_allowed(self, identity_changed, configs):
def test_db_changed_allowed(self, identity_changed, configs,
mock_peer_units, mock_log):
mock_peer_units.return_value = None
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
@@ -205,9 +215,13 @@ class KeystoneRelationTests(CharmTestCase):
relation_id='identity-service:0',
remote_unit='unit/0')
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
def test_db_changed_not_allowed(self, identity_changed, configs):
def test_db_changed_not_allowed(self, identity_changed, configs,
mock_peer_units, mock_log):
mock_peer_units.return_value = None
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
@@ -218,9 +232,13 @@ class KeystoneRelationTests(CharmTestCase):
self.assertFalse(self.ensure_initial_admin.called)
self.assertFalse(identity_changed.called)
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
def test_postgresql_db_changed(self, identity_changed, configs):
def test_postgresql_db_changed(self, identity_changed, configs,
mock_peer_units, mock_log):
mock_peer_units.return_value = None
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
@@ -233,6 +251,7 @@ class KeystoneRelationTests(CharmTestCase):
relation_id='identity-service:0',
remote_unit='unit/0')
@patch('keystone_utils.is_sync_master')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@@ -243,7 +262,8 @@ class KeystoneRelationTests(CharmTestCase):
def test_config_changed_no_openstack_upgrade_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions):
ensure_permissions, is_sync_master):
is_sync_master.return_value = False
self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
@@ -263,9 +283,9 @@ class KeystoneRelationTests(CharmTestCase):
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0',
sync_certs=False)
remote_unit='unit/0')
@patch('keystone_utils.is_sync_master')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@@ -276,7 +296,8 @@ class KeystoneRelationTests(CharmTestCase):
def test_config_changed_no_openstack_upgrade_not_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions):
ensure_permissions, is_sync_master):
is_sync_master.return_value = False
self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = False
@@ -292,6 +313,7 @@ class KeystoneRelationTests(CharmTestCase):
self.assertFalse(self.ensure_initial_admin.called)
self.assertFalse(identity_changed.called)
@patch('keystone_utils.is_sync_master')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@@ -302,7 +324,8 @@ class KeystoneRelationTests(CharmTestCase):
def test_config_changed_with_openstack_upgrade(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions):
ensure_permissions, is_sync_master):
is_sync_master.return_value = False
self.openstack_upgrade_available.return_value = True
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
@@ -324,13 +347,32 @@ class KeystoneRelationTests(CharmTestCase):
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0',
sync_certs=False)
remote_unit='unit/0')
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.is_elected_leader')
@patch('keystone_utils.is_sync_master')
@patch('keystone_utils.update_hash_from_path')
@patch('keystone_utils.synchronize_ca')
@patch.object(hooks, 'hashlib')
@patch.object(hooks, 'send_notifications')
def test_identity_changed_leader(self, mock_send_notifications,
mock_hashlib):
mock_hashlib, mock_synchronize_ca,
mock_update_hash_from_path,
mock_is_sync_master,
mock_is_elected_leader,
mock_relation_ids, mock_peer_units,
mock_log):
mock_peer_units.return_value = None
mock_relation_ids.return_value = []
mock_is_sync_master.return_value = True
mock_is_elected_leader.return_value = True
# Ensure always returns diff
mock_update_hash_from_path.side_effect = \
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
self.is_elected_leader.return_value = True
hooks.identity_changed(
relation_id='identity-service:0',
@@ -338,9 +380,12 @@ class KeystoneRelationTests(CharmTestCase):
self.add_service_to_keystone.assert_called_with(
'identity-service:0',
'unit/0')
self.assertTrue(self.synchronize_ca.called)
self.assertTrue(mock_synchronize_ca.called)
def test_identity_changed_no_leader(self):
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
def test_identity_changed_no_leader(self, mock_peer_units, mock_log):
mock_peer_units.return_value = None
self.is_elected_leader.return_value = False
hooks.identity_changed(
relation_id='identity-service:0',
@@ -356,20 +401,34 @@ class KeystoneRelationTests(CharmTestCase):
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
@patch.object(hooks, 'is_pending_clustered')
@patch('keystone_utils.log')
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.is_elected_leader')
@patch('keystone_utils.is_sync_master')
@patch('keystone_utils.update_hash_from_path')
@patch('keystone_utils.synchronize_ca')
@patch.object(hooks, 'check_peer_actions')
@patch.object(unison, 'ssh_authorized_peers')
@patch.object(hooks, 'CONFIGS')
def test_cluster_changed(self, configs, ssh_authorized_peers,
check_peer_actions, is_pending_clustered):
is_pending_clustered.return_value = False
check_peer_actions,
mock_synchronize_ca, mock_update_hash_from_path,
mock_is_sync_master, mock_is_elected_leader,
mock_relation_ids, mock_log):
mock_relation_ids.return_value = []
mock_is_sync_master.return_value = True
mock_is_elected_leader.return_value = True
# Ensure always returns diff
mock_update_hash_from_path.side_effect = \
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
hooks.cluster_changed()
whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master']
self.peer_echo.assert_called_with(includes=whitelist)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertTrue(mock_synchronize_ca.called)
self.assertTrue(configs.write_all.called)
def test_ha_joined(self):
@@ -419,18 +478,32 @@ class KeystoneRelationTests(CharmTestCase):
}
self.relation_set.assert_called_with(**args)
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch('keystone_utils.synchronize_ca')
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_not_clustered_not_leader(self, configs):
def test_ha_relation_changed_not_clustered_not_leader(self, configs,
mock_synchronize_ca,
mock_peer_units,
mock_log):
mock_peer_units.return_value = None
self.relation_get.return_value = False
self.is_elected_leader.return_value = False
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
self.assertTrue(mock_synchronize_ca.called)
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch('keystone_utils.synchronize_ca')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_clustered_leader(
self, configs, identity_changed):
def test_ha_relation_changed_clustered_leader(self, configs,
identity_changed,
mock_synchronize_ca,
mock_peer_units, mock_log):
mock_peer_units.return_value = None
self.relation_get.return_value = True
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
@@ -443,11 +516,14 @@ class KeystoneRelationTests(CharmTestCase):
'keystone endpoint configuration')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0',
sync_certs=False)
remote_unit='unit/0')
self.assertTrue(mock_synchronize_ca.called)
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch.object(hooks, 'CONFIGS')
def test_configure_https_enable(self, configs):
def test_configure_https_enable(self, configs, mock_peer_units, mock_log):
mock_peer_units.return_value = None
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['https']
configs.write = MagicMock()
@@ -457,8 +533,11 @@ class KeystoneRelationTests(CharmTestCase):
cmd = ['a2ensite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd)
@patch('keystone_utils.log')
@patch('keystone_utils.peer_units')
@patch.object(hooks, 'CONFIGS')
def test_configure_https_disable(self, configs):
def test_configure_https_disable(self, configs, mock_peer_units, mock_log):
mock_peer_units.return_value = None
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['']
configs.write = MagicMock()
@@ -468,8 +547,24 @@ class KeystoneRelationTests(CharmTestCase):
cmd = ['a2dissite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd)
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.is_elected_leader')
@patch('keystone_utils.is_sync_master')
@patch('keystone_utils.update_hash_from_path')
@patch('keystone_utils.synchronize_ca')
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_leader(self, ssh_authorized_peers):
def test_upgrade_charm_leader(self, ssh_authorized_peers,
mock_synchronize_ca,
mock_update_hash_from_path,
mock_is_sync_master, mock_is_elected_leader,
mock_relation_ids):
mock_relation_ids.return_value = []
mock_is_sync_master.return_value = True
mock_is_elected_leader.return_value = True
# Ensure always returns diff
mock_update_hash_from_path.side_effect = \
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
self.is_elected_leader.return_value = True
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
@@ -477,14 +572,31 @@ class KeystoneRelationTests(CharmTestCase):
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertTrue(mock_synchronize_ca.called)
self.log.assert_called_with(
'Cluster leader - ensuring endpoint configuration'
' is up to date')
self.assertTrue(self.ensure_initial_admin.called)
@patch('keystone_utils.relation_ids')
@patch('keystone_utils.is_elected_leader')
@patch('keystone_utils.is_sync_master')
@patch('keystone_utils.update_hash_from_path')
@patch('keystone_utils.synchronize_ca')
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_not_leader(self, ssh_authorized_peers):
def test_upgrade_charm_not_leader(self, ssh_authorized_peers,
mock_synchronize_ca,
mock_update_hash_from_path,
mock_is_sync_master,
mock_is_elected_leader,
mock_relation_ids):
mock_relation_ids.return_value = []
mock_is_sync_master.return_value = True
mock_is_elected_leader.return_value = True
# Ensure always returns diff
mock_update_hash_from_path.side_effect = \
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
self.is_elected_leader.return_value = False
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
@@ -492,6 +604,6 @@ class KeystoneRelationTests(CharmTestCase):
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertTrue(mock_synchronize_ca.called)
self.assertFalse(self.log.called)
self.assertFalse(self.ensure_initial_admin.called)