From 14f39ff1336404220a9544b58b95c1d39c59892c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 5 Jan 2015 17:49:38 +0000 Subject: [PATCH 01/13] [hopem,r=] Fixes ssl cert sycnhronisation across peers Closes-Bug: 1317782 --- hooks/charmhelpers/contrib/unison/__init__.py | 35 +- hooks/keystone_context.py | 51 ++- hooks/keystone_hooks.py | 79 +++-- hooks/keystone_ssl.py | 3 + hooks/keystone_utils.py | 328 +++++++++++++++--- unit_tests/test_keystone_hooks.py | 50 +-- unit_tests/test_keystone_utils.py | 4 +- 7 files changed, 440 insertions(+), 110 deletions(-) diff --git a/hooks/charmhelpers/contrib/unison/__init__.py b/hooks/charmhelpers/contrib/unison/__init__.py index f903ac03..261f7cd2 100644 --- a/hooks/charmhelpers/contrib/unison/__init__.py +++ b/hooks/charmhelpers/contrib/unison/__init__.py @@ -228,7 +228,12 @@ def collect_authed_hosts(peer_interface): return hosts -def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None): +def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync path to an specific peer host + + Propagates exception if operation fails and fatal=True. + """ cmd = cmd or copy(BASE_CMD) if not verbose: cmd.append('-silent') @@ -245,20 +250,30 @@ def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None): run_as_user(user, cmd, gid) except: log('Error syncing remote files') + if fatal: + raise -def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None): - '''Sync paths to an specific host''' +def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync paths to an specific peer host + + Propagates exception if any operation fails and fatal=True. + """ if paths: for p in paths: - sync_path_to_host(p, host, user, verbose, cmd, gid) + sync_path_to_host(p, host, user, verbose, cmd, gid, fatal) -def sync_to_peers(peer_interface, user, paths=None, - verbose=False, cmd=None, gid=None): - '''Sync all hosts to an specific path''' - '''The type of group is integer, it allows user has permissions to ''' - '''operate a directory have a different group id with the user id.''' +def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None, + gid=None, fatal=False): + """Sync all hosts to an specific path + + The type of group is integer, it allows user has permissions to + operate a directory have a different group id with the user id. + + Propagates exception if any operation fails and fatal=True. + """ if paths: for host in collect_authed_hosts(peer_interface): - sync_to_peer(host, user, paths, verbose, cmd, gid) + sync_to_peer(host, user, paths, verbose, cmd, gid, fatal) diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py index c61e70a1..cfb7eb72 100644 --- a/hooks/keystone_context.py +++ b/hooks/keystone_context.py @@ -1,3 +1,5 @@ +import os + from charmhelpers.core.hookenv import config from charmhelpers.core.host import mkdir, write_file @@ -9,9 +11,12 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_api_port ) -from charmhelpers.contrib.hahelpers.apache import install_ca_cert +from charmhelpers.core.hookenv import ( + log, + INFO, +) -import os +from charmhelpers.contrib.hahelpers.apache import install_ca_cert CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -29,20 +34,52 @@ class ApacheSSLContext(context.ApacheSSLContext): return super(ApacheSSLContext, self).__call__() def configure_cert(self, cn): - from keystone_utils import SSH_USER, get_ca + from keystone_utils import ( + SSH_USER, + get_ca, + is_ssl_cert_master, + ensure_permissions, + ) + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - mkdir(path=ssl_dir) + perms = 0o755 + mkdir(path=ssl_dir, owner=SSH_USER, group='keystone', perms=perms) + # Ensure accessible by keystone ssh user and group (for sync) + ensure_permissions(ssl_dir, user=SSH_USER, group='keystone', + perms=perms) + + if not is_ssl_cert_master(): + log("Not leader or cert master so skipping apache cert config", + level=INFO) + return + + log("Creating apache ssl certs in %s" % (ssl_dir), level=INFO) + ca = get_ca(user=SSH_USER) cert, key = ca.get_cert_and_key(common_name=cn) write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)), - content=cert) + content=cert, owner=SSH_USER, group='keystone', perms=0o644) write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)), - content=key) + content=key, owner=SSH_USER, group='keystone', perms=0o644) def configure_ca(self): - from keystone_utils import SSH_USER, get_ca + from keystone_utils import ( + SSH_USER, + get_ca, + is_ssl_cert_master, + ensure_permissions, + ) + + if not is_ssl_cert_master(): + log("Not leader or cert master so skipping apache ca config", + level=INFO) + return + ca = get_ca(user=SSH_USER) install_ca_cert(ca.get_ca_bundle()) + # Ensure accessible by keystone ssh user and group (unison) + ensure_permissions(CA_CERT_PATH, user=SSH_USER, group='keystone', + perms=0o0644) def canonical_names(self): addresses = self.get_network_addresses() diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 27b987e9..4ab442a9 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -1,7 +1,8 @@ #!/usr/bin/python - import hashlib import os +import re +import stat import sys import time @@ -16,6 +17,7 @@ from charmhelpers.core.hookenv import ( is_relation_made, log, local_unit, + WARNING, ERROR, relation_get, relation_ids, @@ -57,11 +59,13 @@ from keystone_utils import ( STORED_PASSWD, setup_ipv6, send_notifications, + check_peer_actions, + CA_CERT_PATH, + ensure_permissions, ) from charmhelpers.contrib.hahelpers.cluster import ( - eligible_leader, - is_leader, + is_elected_leader, get_hacluster_config, ) @@ -109,10 +113,19 @@ def config_changed(): check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/']) + # Ensure unison can write to certs dir. + # FIXME: need to a better way around this e.g. move cert to it's own dir + # and give that unison permissions. + path = os.path.dirname(CA_CERT_PATH) + perms = int(oct(stat.S_IMODE(os.stat(path).st_mode) | + (stat.S_IWGRP | stat.S_IXGRP)), base=8) + ensure_permissions(path, group='keystone', perms=perms) + save_script_rc() configure_https() CONFIGS.write_all() - if eligible_leader(CLUSTER_RES): + + if is_elected_leader(CLUSTER_RES): migrate_database() ensure_initial_admin(config) log('Firing identity_changed hook for all related services.') @@ -121,7 +134,9 @@ def config_changed(): for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): identity_changed(relation_id=r_id, - remote_unit=unit) + remote_unit=unit, sync_certs=False) + + synchronize_ca() [cluster_joined(rid) for rid in relation_ids('cluster')] @@ -163,7 +178,7 @@ def db_changed(): log('shared-db relation incomplete. Peer not ready?') else: CONFIGS.write(KEYSTONE_CONF) - if eligible_leader(CLUSTER_RES): + if is_elected_leader(CLUSTER_RES): # Bugs 1353135 & 1187508. Dbs can appear to be ready before the # units acl entry has been added. So, if the db supports passing # a list of permitted units then check if we're in the list. @@ -188,7 +203,7 @@ def pgsql_db_changed(): log('pgsql-db relation incomplete. Peer not ready?') else: CONFIGS.write(KEYSTONE_CONF) - if eligible_leader(CLUSTER_RES): + if is_elected_leader(CLUSTER_RES): migrate_database() ensure_initial_admin(config) # Ensure any existing service entries are updated in the @@ -199,11 +214,27 @@ def pgsql_db_changed(): @hooks.hook('identity-service-relation-changed') -def identity_changed(relation_id=None, remote_unit=None): +def identity_changed(relation_id=None, remote_unit=None, sync_certs=True): notifications = {} - if eligible_leader(CLUSTER_RES): - add_service_to_keystone(relation_id, remote_unit) - synchronize_ca() + if is_elected_leader(CLUSTER_RES): + # Catch database not configured error and defer until db ready + from keystoneclient.apiclient.exceptions import InternalServerError + try: + add_service_to_keystone(relation_id, remote_unit) + except InternalServerError as exc: + key = re.compile("'keystone\..+' doesn't exist") + if re.search(key, exc.message): + log("Keystone database not yet ready (InternalServerError " + "raised) - deferring until *-db relation completes.", + level=WARNING) + return + + log("Unexpected exception occurred", level=ERROR) + raise + + CONFIGS.write_all() + if sync_certs: + synchronize_ca() settings = relation_get(rid=relation_id, unit=remote_unit) service = settings.get('service', None) @@ -257,18 +288,22 @@ def cluster_joined(relation_id=None): 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) def cluster_changed(): + check_peer_actions() + # NOTE(jamespage) re-echo passwords for peer storage - peer_echo(includes=['_passwd', 'identity-service:']) + echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] + peer_echo(includes=echo_whitelist) unison.ssh_authorized_peers(user=SSH_USER, group='keystone', peer_interface='cluster', ensure_local_user=True) - synchronize_ca() CONFIGS.write_all() for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): - identity_changed(relation_id=r_id, - remote_unit=unit) + identity_changed(relation_id=r_id, remote_unit=unit, + sync_certs=False) + + synchronize_ca() @hooks.hook('ha-relation-joined') @@ -325,14 +360,16 @@ def ha_joined(): def ha_changed(): clustered = relation_get('clustered') CONFIGS.write_all() - if (clustered is not None and - is_leader(CLUSTER_RES)): + if clustered is not None and is_elected_leader(CLUSTER_RES): ensure_initial_admin(config) log('Cluster configured, notifying other services and updating ' 'keystone endpoint configuration') for rid in relation_ids('identity-service'): for unit in related_units(rid): - identity_changed(relation_id=rid, remote_unit=unit) + identity_changed(relation_id=rid, remote_unit=unit, + sync_certs=False) + + synchronize_ca() @hooks.hook('identity-admin-relation-changed') @@ -375,8 +412,7 @@ def upgrade_charm(): group='keystone', peer_interface='cluster', ensure_local_user=True) - synchronize_ca() - if eligible_leader(CLUSTER_RES): + if is_elected_leader(CLUSTER_RES): log('Cluster leader - ensuring endpoint configuration' ' is up to date') time.sleep(10) @@ -385,8 +421,9 @@ def upgrade_charm(): for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): identity_changed(relation_id=r_id, - remote_unit=unit) + remote_unit=unit, sync_certs=False) CONFIGS.write_all() + synchronize_ca() def main(): diff --git a/hooks/keystone_ssl.py b/hooks/keystone_ssl.py index 45e0029d..6b2f4a85 100644 --- a/hooks/keystone_ssl.py +++ b/hooks/keystone_ssl.py @@ -101,6 +101,9 @@ keyUsage = digitalSignature, keyEncipherment, keyAgreement extendedKeyUsage = serverAuth, clientAuth """ +# Instance can be appended to this list to represent a singleton +CA_SINGLETON = [] + def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT): print 'Ensuring certificate authority exists at %s.' % ca_dir diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 08bd3de7..ff14f941 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -1,8 +1,12 @@ #!/usr/bin/python +import glob +import grp import subprocess import os +import pwd import uuid import urlparse +import shutil import time from base64 import b64encode @@ -10,11 +14,11 @@ from collections import OrderedDict from copy import deepcopy from charmhelpers.contrib.hahelpers.cluster import( - eligible_leader, + is_elected_leader, determine_api_port, https, is_clustered, - is_elected_leader, + peer_units, ) from charmhelpers.contrib.openstack import context, templating @@ -37,8 +41,17 @@ from charmhelpers.contrib.openstack.utils import ( os_release, save_script_rc as _save_script_rc) +from charmhelpers.core.host import ( + mkdir, + write_file, +) + import charmhelpers.contrib.unison as unison +from charmhelpers.core.decorators import ( + retry_on_exception, +) + from charmhelpers.core.hookenv import ( config, log, @@ -46,8 +59,10 @@ from charmhelpers.core.hookenv import ( relation_get, relation_set, relation_ids, + unit_get, DEBUG, INFO, + WARNING, ) from charmhelpers.fetch import ( @@ -60,6 +75,7 @@ from charmhelpers.fetch import ( from charmhelpers.core.host import ( service_stop, service_start, + service_restart, pwgen, lsb_release ) @@ -108,6 +124,8 @@ HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend' APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf' +APACHE_SSL_DIR = '/etc/apache2/ssl/keystone' +SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/' SSL_DIR = '/var/lib/keystone/juju_ssl/' SSL_CA_NAME = 'Ubuntu Cloud' CLUSTER_RES = 'grp_ks_vips' @@ -197,6 +215,13 @@ valid_services = { } +def str_is_true(value): + if value and value.lower() in ['true', 'yes']: + return True + + return False + + def resource_map(): ''' Dynamically generate a map of resources that will be managed for a single @@ -272,7 +297,7 @@ def do_openstack_upgrade(configs): configs.set_release(openstack_release=new_os_rel) configs.write_all() - if eligible_leader(CLUSTER_RES): + if is_elected_leader(CLUSTER_RES): migrate_database() @@ -474,44 +499,57 @@ def grant_role(user, role, tenant): def ensure_initial_admin(config): - """ Ensures the minimum admin stuff exists in whatever database we're + # Allow retry on fail since leader may not be ready yet. + # NOTE(hopem): ks client may not be installed at module import time so we + # use this wrapped approach instead. + from keystoneclient.apiclient.exceptions import InternalServerError + + @retry_on_exception(3, base_delay=3, exc_type=InternalServerError) + def _ensure_initial_admin(config): + """Ensures the minimum admin stuff exists in whatever database we're using. + This and the helper functions it calls are meant to be idempotent and run during install as well as during db-changed. This will maintain the admin tenant, user, role, service entry and endpoint across every datastore we might use. + TODO: Possibly migrate data from one backend to another after it changes? - """ - create_tenant("admin") - create_tenant(config("service-tenant")) + """ + create_tenant("admin") + create_tenant(config("service-tenant")) - passwd = "" - if config("admin-password") != "None": - passwd = config("admin-password") - elif os.path.isfile(STORED_PASSWD): - log("Loading stored passwd from %s" % STORED_PASSWD) - passwd = open(STORED_PASSWD, 'r').readline().strip('\n') - if passwd == "": - log("Generating new passwd for user: %s" % - config("admin-user")) - cmd = ['pwgen', '-c', '16', '1'] - passwd = str(subprocess.check_output(cmd)).strip() - open(STORED_PASSWD, 'w+').writelines("%s\n" % passwd) - # User is managed by ldap backend when using ldap identity - if not (config('identity-backend') == 'ldap' and config('ldap-readonly')): - create_user(config('admin-user'), passwd, tenant='admin') - update_user_password(config('admin-user'), passwd) - create_role(config('admin-role'), config('admin-user'), 'admin') - create_service_entry("keystone", "identity", "Keystone Identity Service") + passwd = "" + if config("admin-password") != "None": + passwd = config("admin-password") + elif os.path.isfile(STORED_PASSWD): + log("Loading stored passwd from %s" % STORED_PASSWD) + passwd = open(STORED_PASSWD, 'r').readline().strip('\n') + if passwd == "": + log("Generating new passwd for user: %s" % + config("admin-user")) + cmd = ['pwgen', '-c', '16', '1'] + passwd = str(subprocess.check_output(cmd)).strip() + open(STORED_PASSWD, 'w+').writelines("%s\n" % passwd) + # User is managed by ldap backend when using ldap identity + if (not (config('identity-backend') == 'ldap' and + config('ldap-readonly'))): + create_user(config('admin-user'), passwd, tenant='admin') + update_user_password(config('admin-user'), passwd) + create_role(config('admin-role'), config('admin-user'), 'admin') + create_service_entry("keystone", "identity", + "Keystone Identity Service") - for region in config('region').split(): - create_keystone_endpoint(public_ip=resolve_address(PUBLIC), - service_port=config("service-port"), - internal_ip=resolve_address(INTERNAL), - admin_ip=resolve_address(ADMIN), - auth_port=config("admin-port"), - region=region) + for region in config('region').split(): + create_keystone_endpoint(public_ip=resolve_address(PUBLIC), + service_port=config("service-port"), + internal_ip=resolve_address(INTERNAL), + admin_ip=resolve_address(ADMIN), + auth_port=config("admin-port"), + region=region) + + return _ensure_initial_admin(config) def endpoint_url(ip, port): @@ -579,20 +617,201 @@ def get_service_password(service_username): return passwd -def synchronize_ca(): - ''' - Broadcast service credentials to peers or consume those that have been - broadcasted by peer, depending on hook context. - ''' - if not eligible_leader(CLUSTER_RES): - return - log('Synchronizing CA to all peers.') - if is_clustered(): - if config('https-service-endpoints') in ['True', 'true']: - unison.sync_to_peers(peer_interface='cluster', - paths=[SSL_DIR], user=SSH_USER, verbose=True) +def ensure_permissions(path, user=None, group=None, perms=None): + """Set chownand chmod for path -CA = [] + Note that -1 for uid or gid result in no change. + """ + if user: + uid = pwd.getpwnam(user).pw_uid + else: + uid = -1 + + if group: + gid = grp.getgrnam(group).gr_gid + else: + gid = -1 + + os.chown(path, uid, gid) + + if perms: + os.chmod(path, perms) + + +def check_peer_actions(): + """Honour service action requests from sync master. + + Check for service action request flags, perform the action then delete the + flag. + """ + restart = relation_get(attribute='restart-services-trigger') + if restart and os.path.isdir(SYNC_FLAGS_DIR): + for flagfile in glob.glob(os.path.join(SYNC_FLAGS_DIR, '*')): + flag = os.path.basename(flagfile) + service = flag.partition('.')[0] + action = flag.partition('.')[2] + + if action == 'restart': + log("Running action='%s' on service '%s'" % + (action, service), level=DEBUG) + service_restart(service) + elif action == 'start': + log("Running action='%s' on service '%s'" % + (action, service), level=DEBUG) + service_start(service) + elif action == 'stop': + log("Running action='%s' on service '%s'" % + (action, service), level=DEBUG) + service_stop(service) + elif flag == 'update-ca-certificates': + log("Running update-ca-certificates", level=DEBUG) + subprocess.check_call(['update-ca-certificates']) + else: + log("Unknown action flag=%s" % (flag), level=WARNING) + + os.remove(flagfile) + + +def create_peer_service_actions(action, services): + """Mark remote services for action. + + Default action is restart. These action will be picked up by peer units + e.g. we may need to restart services on peer units after certs have been + synced. + """ + for service in services: + flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s' % + (service.strip(), action)) + log("Creating action %s" % (flagfile), level=DEBUG) + write_file(flagfile, content='', owner=SSH_USER, group='keystone', + perms=0o644) + + +def create_service_action(action): + flagfile = os.path.join(SYNC_FLAGS_DIR, action) + log("Creating action %s" % (flagfile), level=DEBUG) + write_file(flagfile, content='', owner=SSH_USER, group='keystone', + perms=0o644) + + +def is_ssl_cert_master(): + """Return True if this unit is ssl cert master.""" + master = None + for rid in relation_ids('cluster'): + master = relation_get(attribute='ssl-cert-master', rid=rid, + unit=local_unit()) + + if master and master == unit_get('private-address'): + return True + + return False + + +@retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError) +def unison_sync(paths_to_sync): + """Do unison sync and retry a few times if it fails since peers may not be + ready for sync. + """ + log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)), + level=INFO) + keystone_gid = grp.getgrnam('keystone').gr_gid + unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync, + user=SSH_USER, verbose=True, gid=keystone_gid, + fatal=True) + + +def synchronize_ca(fatal=True): + """Broadcast service credentials to peers. + + By default a failure to sync is fatal and will result in a raised + exception. + + This function uses a relation setting 'ssl-cert-master' to get some + leader stickiness while synchronisation is being carried out. This ensures + that the last host to create and broadcast cetificates has the option to + complete actions before electing the new leader as sync master. + """ + paths_to_sync = [SYNC_FLAGS_DIR] + + if not peer_units(): + log("Not syncing certs since there are no peer units.", level=INFO) + return + + # If no ssl master elected and we are cluster leader, elect this unit. + if is_elected_leader(CLUSTER_RES): + master = relation_get(attribute='ssl-cert-master') + if not master or master == 'unknown': + log("Electing this unit as ssl-cert-master", level=DEBUG) + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, + relation_settings={'ssl-cert-master': + unit_get('private-address'), + 'trigger': str(uuid.uuid4())}) + # Return now and wait for echo before continuing. + return + + if not is_ssl_cert_master(): + log("Not ssl cert master - skipping sync", level=INFO) + return + + if str_is_true(config('https-service-endpoints')): + log("Syncing all endpoint certs since https-service-endpoints=True", + level=DEBUG) + paths_to_sync.append(SSL_DIR) + paths_to_sync.append(APACHE_SSL_DIR) + paths_to_sync.append(CA_CERT_PATH) + elif str_is_true(config('use-https')): + log("Syncing keystone-endpoint certs since use-https=True", + level=DEBUG) + paths_to_sync.append(APACHE_SSL_DIR) + paths_to_sync.append(CA_CERT_PATH) + + if not paths_to_sync: + log("Nothing to sync - skipping", level=DEBUG) + return + + # If we are sync master proceed even if we are not leader since we are + # most likely to have up-to-date certs. If not leader we will re-elect once + # synced. This is done to avoid being affected by leader changing before + # all units have synced certs. + + # Clear any existing flags + if os.path.isdir(SYNC_FLAGS_DIR): + shutil.rmtree(SYNC_FLAGS_DIR) + + mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) + + # We need to restart peer apache services to ensure they have picked up + # new ssl keys. + create_peer_service_actions('restart', ['apache2']) + create_service_action('update-ca-certificates') + + try: + unison_sync(paths_to_sync) + except: + if fatal: + raise + else: + log("Sync failed but fatal=False", level=INFO) + return + + trigger = str(uuid.uuid4()) + log("Sending restart-services-trigger=%s to all peers" % (trigger), + level=DEBUG) + settings = {'restart-services-trigger': trigger} + + # Cleanup + shutil.rmtree(SYNC_FLAGS_DIR) + mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) + + # If we are the sync master but no longer leader then re-elect master. + if not is_elected_leader(CLUSTER_RES): + log("Re-electing ssl cert master.", level=INFO) + settings['ssl-cert-master'] = 'unknown' + + log("Sync complete - sending peer info", level=DEBUG) + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, **settings) def get_ca(user='keystone', group='keystone'): @@ -600,22 +819,31 @@ def get_ca(user='keystone', group='keystone'): Initialize a new CA object if one hasn't already been loaded. This will create a new CA or load an existing one. """ - if not CA: + if not ssl.CA_SINGLETON: if not os.path.isdir(SSL_DIR): os.mkdir(SSL_DIR) + d_name = '_'.join(SSL_CA_NAME.lower().split(' ')) ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group, ca_dir=os.path.join(SSL_DIR, '%s_intermediate_ca' % d_name), root_ca_dir=os.path.join(SSL_DIR, '%s_root_ca' % d_name)) + # SSL_DIR is synchronized via all peers over unison+ssh, need # to ensure permissions. subprocess.check_output(['chown', '-R', '%s.%s' % (user, group), '%s' % SSL_DIR]) subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR]) - CA.append(ca) - return CA[0] + + # Tag this host as the ssl cert master. + if is_clustered() or peer_units(): + peer_store(key='ssl-cert-master', + value=unit_get('private-address')) + + ssl.CA_SINGLETON.append(ca) + + return ssl.CA_SINGLETON[0] def relation_list(rid): @@ -657,7 +885,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): relation_data["auth_port"] = config('admin-port') relation_data["service_port"] = config('service-port') relation_data["region"] = config('region') - if config('https-service-endpoints') in ['True', 'true']: + if str_is_true(config('https-service-endpoints')): # Pass CA cert as client will need it to # verify https connections ca = get_ca(user=SSH_USER) @@ -796,7 +1024,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): relation_data["auth_protocol"] = "http" relation_data["service_protocol"] = "http" # generate or get a new cert/key for service if set to manage certs. - if config('https-service-endpoints') in ['True', 'true']: + if str_is_true(config('https-service-endpoints')): ca = get_ca(user=SSH_USER) # NOTE(jamespage) may have multiple cns to deal with to iterate https_cns = set(https_cns) diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index b8cdee0d..48a60e22 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -43,8 +43,7 @@ TO_PATCH = [ # charmhelpers.contrib.openstack.utils 'configure_installation_source', # charmhelpers.contrib.hahelpers.cluster_utils - 'is_leader', - 'eligible_leader', + 'is_elected_leader', 'get_hacluster_config', # keystone_utils 'restart_map', @@ -234,6 +233,7 @@ class KeystoneRelationTests(CharmTestCase): relation_id='identity-service:0', remote_unit='unit/0') + @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @patch.object(unison, 'get_homedir') @@ -242,9 +242,10 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'configure_https') def test_config_changed_no_openstack_upgrade_leader( self, configure_https, identity_changed, - configs, get_homedir, ensure_user, cluster_joined): + configs, get_homedir, ensure_user, cluster_joined, + ensure_permissions): self.openstack_upgrade_available.return_value = False - self.eligible_leader.return_value = True + self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] self.relation_list.return_value = ['unit/0'] @@ -262,8 +263,10 @@ class KeystoneRelationTests(CharmTestCase): 'Firing identity_changed hook for all related services.') identity_changed.assert_called_with( relation_id='identity-service:0', - remote_unit='unit/0') + remote_unit='unit/0', + sync_certs=False) + @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @patch.object(unison, 'get_homedir') @@ -272,9 +275,10 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'configure_https') def test_config_changed_no_openstack_upgrade_not_leader( self, configure_https, identity_changed, - configs, get_homedir, ensure_user, cluster_joined): + configs, get_homedir, ensure_user, cluster_joined, + ensure_permissions): self.openstack_upgrade_available.return_value = False - self.eligible_leader.return_value = False + self.is_elected_leader.return_value = False hooks.config_changed() ensure_user.assert_called_with(user=self.ssh_user, group='keystone') @@ -288,6 +292,7 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(self.ensure_initial_admin.called) self.assertFalse(identity_changed.called) + @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @patch.object(unison, 'get_homedir') @@ -296,9 +301,10 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'configure_https') def test_config_changed_with_openstack_upgrade( self, configure_https, identity_changed, - configs, get_homedir, ensure_user, cluster_joined): + configs, get_homedir, ensure_user, cluster_joined, + ensure_permissions): self.openstack_upgrade_available.return_value = True - self.eligible_leader.return_value = True + self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] self.relation_list.return_value = ['unit/0'] @@ -318,13 +324,14 @@ class KeystoneRelationTests(CharmTestCase): 'Firing identity_changed hook for all related services.') identity_changed.assert_called_with( relation_id='identity-service:0', - remote_unit='unit/0') + remote_unit='unit/0', + sync_certs=False) @patch.object(hooks, 'hashlib') @patch.object(hooks, 'send_notifications') def test_identity_changed_leader(self, mock_send_notifications, mock_hashlib): - self.eligible_leader.return_value = True + self.is_elected_leader.return_value = True hooks.identity_changed( relation_id='identity-service:0', remote_unit='unit/0') @@ -334,7 +341,7 @@ class KeystoneRelationTests(CharmTestCase): self.assertTrue(self.synchronize_ca.called) def test_identity_changed_no_leader(self): - self.eligible_leader.return_value = False + self.is_elected_leader.return_value = False hooks.identity_changed( relation_id='identity-service:0', remote_unit='unit/0') @@ -349,12 +356,14 @@ class KeystoneRelationTests(CharmTestCase): user=self.ssh_user, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) + @patch.object(hooks, 'check_peer_actions') @patch.object(unison, 'ssh_authorized_peers') @patch.object(hooks, 'CONFIGS') - def test_cluster_changed(self, configs, ssh_authorized_peers): + def test_cluster_changed(self, configs, ssh_authorized_peers, + check_peer_actions): hooks.cluster_changed() - self.peer_echo.assert_called_with(includes=['_passwd', - 'identity-service:']) + whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] + self.peer_echo.assert_called_with(includes=whitelist) ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) @@ -411,7 +420,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_not_clustered_not_leader(self, configs): self.relation_get.return_value = False - self.is_leader.return_value = False + self.is_elected_leader.return_value = False hooks.ha_changed() self.assertTrue(configs.write_all.called) @@ -421,7 +430,7 @@ class KeystoneRelationTests(CharmTestCase): def test_ha_relation_changed_clustered_leader( self, configs, identity_changed): self.relation_get.return_value = True - self.is_leader.return_value = True + self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -432,7 +441,8 @@ class KeystoneRelationTests(CharmTestCase): 'keystone endpoint configuration') identity_changed.assert_called_with( relation_id='identity-service:0', - remote_unit='unit/0') + remote_unit='unit/0', + sync_certs=False) @patch.object(hooks, 'CONFIGS') def test_configure_https_enable(self, configs): @@ -458,7 +468,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(unison, 'ssh_authorized_peers') def test_upgrade_charm_leader(self, ssh_authorized_peers): - self.eligible_leader.return_value = True + self.is_elected_leader.return_value = True self.filter_installed_packages.return_value = [] hooks.upgrade_charm() self.assertTrue(self.apt_install.called) @@ -473,7 +483,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(unison, 'ssh_authorized_peers') def test_upgrade_charm_not_leader(self, ssh_authorized_peers): - self.eligible_leader.return_value = False + self.is_elected_leader.return_value = False self.filter_installed_packages.return_value = [] hooks.upgrade_charm() self.assertTrue(self.apt_install.called) diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index f504b21e..53a874cd 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -26,7 +26,7 @@ TO_PATCH = [ 'get_os_codename_install_source', 'grant_role', 'configure_installation_source', - 'eligible_leader', + 'is_elected_leader', 'https', 'is_clustered', 'peer_store_and_set', @@ -113,7 +113,7 @@ class TestKeystoneUtils(CharmTestCase): self, migrate_database, determine_packages, configs): self.test_config.set('openstack-origin', 'precise') determine_packages.return_value = [] - self.eligible_leader.return_value = True + self.is_elected_leader.return_value = True utils.do_openstack_upgrade(configs) From ee78c8bb554f3eda43b6f14dd44d1c78a9a0bd6c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 5 Jan 2015 21:57:25 +0000 Subject: [PATCH 02/13] defer some action to ha rel hook if waiting on 'clustered' --- hooks/keystone_hooks.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 4ab442a9..8570c253 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( is_relation_made, log, local_unit, + INFO, WARNING, ERROR, relation_get, @@ -284,6 +285,19 @@ def cluster_joined(relation_id=None): relation_settings={'private-address': private_addr}) +def is_pending_clustered(): + """If we have HA relations but are not yet 'clustered' return True.""" + pending = False + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + if relation_get('clustered', rid=r_id, unit=unit): + pending = False + else: + pending = True + + return pending + + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) @@ -298,6 +312,14 @@ def cluster_changed(): peer_interface='cluster', ensure_local_user=True) CONFIGS.write_all() + + # If we have a pending cluster formation, defer following actions to the ha + # relation hook instead. + if is_pending_clustered(): + log("Waiting for ha to be 'clustered' - deferring identity-updates " + "and cert sync to ha relation", level=INFO) + return + for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): identity_changed(relation_id=r_id, remote_unit=unit, @@ -364,6 +386,7 @@ def ha_changed(): ensure_initial_admin(config) log('Cluster configured, notifying other services and updating ' 'keystone endpoint configuration') + for rid in relation_ids('identity-service'): for unit in related_units(rid): identity_changed(relation_id=rid, remote_unit=unit, From 61b07fc623eedb286b18c5eaaadffa9ea0024e55 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 8 Jan 2015 14:56:31 +0000 Subject: [PATCH 03/13] more cluster relation noise reduction --- hooks/keystone_hooks.py | 16 ++-------------- hooks/keystone_utils.py | 10 ++++++++++ unit_tests/test_keystone_hooks.py | 4 +++- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 8570c253..652679fe 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -63,6 +63,7 @@ from keystone_utils import ( check_peer_actions, CA_CERT_PATH, ensure_permissions, + is_pending_clustered, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -285,19 +286,6 @@ def cluster_joined(relation_id=None): relation_settings={'private-address': private_addr}) -def is_pending_clustered(): - """If we have HA relations but are not yet 'clustered' return True.""" - pending = False - for r_id in (relation_ids('ha') or []): - for unit in (relation_list(r_id) or []): - if relation_get('clustered', rid=r_id, unit=unit): - pending = False - else: - pending = True - - return pending - - @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) @@ -306,7 +294,6 @@ def cluster_changed(): # NOTE(jamespage) re-echo passwords for peer storage echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] - peer_echo(includes=echo_whitelist) unison.ssh_authorized_peers(user=SSH_USER, group='keystone', peer_interface='cluster', @@ -326,6 +313,7 @@ def cluster_changed(): sync_certs=False) synchronize_ca() + peer_echo(includes=echo_whitelist) @hooks.hook('ha-relation-joined') diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index ff14f941..6ee620c9 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -1149,3 +1149,13 @@ def send_notifications(data, force=False): level=DEBUG) for rid in rel_ids: relation_set(relation_id=rid, relation_settings=_notifications) + + +def is_pending_clustered(): + """If we have HA relations but are not yet 'clustered' return True.""" + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + if not relation_get('clustered', rid=r_id, unit=unit): + return True + + return False diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 48a60e22..3829f648 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -356,11 +356,13 @@ class KeystoneRelationTests(CharmTestCase): user=self.ssh_user, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) + @patch.object(hooks, 'is_pending_clustered') @patch.object(hooks, 'check_peer_actions') @patch.object(unison, 'ssh_authorized_peers') @patch.object(hooks, 'CONFIGS') def test_cluster_changed(self, configs, ssh_authorized_peers, - check_peer_actions): + check_peer_actions, is_pending_clustered): + is_pending_clustered.return_value = False hooks.cluster_changed() whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] self.peer_echo.assert_called_with(includes=whitelist) From a760082802b75d0577e23c490e9c584175a65301 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Sat, 10 Jan 2015 14:56:22 +0000 Subject: [PATCH 04/13] Fixed a few race issues and switched to using decorators --- hooks/keystone_hooks.py | 134 ++++++++++++---------- hooks/keystone_utils.py | 180 +++++++++++++++++++++--------- unit_tests/test_keystone_hooks.py | 172 +++++++++++++++++++++++----- 3 files changed, 346 insertions(+), 140 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 652679fe..21a476ac 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( is_relation_made, log, local_unit, + DEBUG, INFO, WARNING, ERROR, @@ -24,6 +25,7 @@ from charmhelpers.core.hookenv import ( relation_ids, relation_set, related_units, + remote_unit, unit_get, ) @@ -50,9 +52,8 @@ from keystone_utils import ( ensure_initial_admin, migrate_database, save_script_rc, - synchronize_ca, + synchronize_ca_if_changed, register_configs, - relation_list, restart_map, CLUSTER_RES, KEYSTONE_CONF, @@ -63,12 +64,13 @@ from keystone_utils import ( check_peer_actions, CA_CERT_PATH, ensure_permissions, - is_pending_clustered, + is_ssl_cert_master, ) from charmhelpers.contrib.hahelpers.cluster import ( is_elected_leader, get_hacluster_config, + peer_units, ) from charmhelpers.payload.execd import execd_preinstall @@ -99,12 +101,14 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map()) +@synchronize_ca_if_changed(fatal=False) def config_changed(): if config('prefer-ipv6'): setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), config('database-user')) + unison.ensure_user(user=SSH_USER, group='juju_keystone') unison.ensure_user(user=SSH_USER, group='keystone') homedir = unison.get_homedir(SSH_USER) if not os.path.isdir(homedir): @@ -127,20 +131,10 @@ def config_changed(): configure_https() CONFIGS.write_all() - if is_elected_leader(CLUSTER_RES): - migrate_database() - ensure_initial_admin(config) - log('Firing identity_changed hook for all related services.') - # HTTPS may have been set - so fire all identity relations - # again - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - identity_changed(relation_id=r_id, - remote_unit=unit, sync_certs=False) - - synchronize_ca() - - [cluster_joined(rid) for rid in relation_ids('cluster')] + # Update relations since SSL may have been configured. If we have peer + # units we can rely on the sync to do this in cluster relation. + if is_elected_leader(CLUSTER_RES) and not peer_units(): + update_all_identity_relation_units() @hooks.hook('shared-db-relation-joined') @@ -173,8 +167,23 @@ def pgsql_db_joined(): relation_set(database=config('database')) +def update_all_identity_relation_units(): + try: + migrate_database() + except Exception as exc: + log("Database initialisation failed (%s) - db not ready?" % (exc), + level=WARNING) + else: + ensure_initial_admin(config) + log('Firing identity_changed hook for all related services.') + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + identity_changed(relation_id=rid, remote_unit=unit) + + @hooks.hook('shared-db-relation-changed') @restart_on_change(restart_map()) +@synchronize_ca_if_changed() def db_changed(): if 'shared-db' not in CONFIGS.complete_contexts(): log('shared-db relation incomplete. Peer not ready?') @@ -189,34 +198,28 @@ def db_changed(): if allowed_units and local_unit() not in allowed_units.split(): log('Allowed_units list provided and this unit not present') return - migrate_database() - ensure_initial_admin(config) # Ensure any existing service entries are updated in the # new database backend - for rid in relation_ids('identity-service'): - for unit in related_units(rid): - identity_changed(relation_id=rid, remote_unit=unit) + update_all_identity_relation_units() @hooks.hook('pgsql-db-relation-changed') @restart_on_change(restart_map()) +@synchronize_ca_if_changed() def pgsql_db_changed(): if 'pgsql-db' not in CONFIGS.complete_contexts(): log('pgsql-db relation incomplete. Peer not ready?') else: CONFIGS.write(KEYSTONE_CONF) if is_elected_leader(CLUSTER_RES): - migrate_database() - ensure_initial_admin(config) # Ensure any existing service entries are updated in the # new database backend - for rid in relation_ids('identity-service'): - for unit in related_units(rid): - identity_changed(relation_id=rid, remote_unit=unit) + update_all_identity_relation_units() @hooks.hook('identity-service-relation-changed') -def identity_changed(relation_id=None, remote_unit=None, sync_certs=True): +@synchronize_ca_if_changed() +def identity_changed(relation_id=None, remote_unit=None): notifications = {} if is_elected_leader(CLUSTER_RES): # Catch database not configured error and defer until db ready @@ -235,8 +238,6 @@ def identity_changed(relation_id=None, remote_unit=None, sync_certs=True): raise CONFIGS.write_all() - if sync_certs: - synchronize_ca() settings = relation_get(rid=relation_id, unit=remote_unit) service = settings.get('service', None) @@ -286,33 +287,55 @@ def cluster_joined(relation_id=None): relation_settings={'private-address': private_addr}) +@synchronize_ca_if_changed() +def identity_updates_with_ssl_sync(): + CONFIGS.write_all() + update_all_identity_relation_units() + + +@synchronize_ca_if_changed(force=True) +def identity_updates_with_forced_ssl_sync(): + identity_updates_with_ssl_sync() + + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) def cluster_changed(): check_peer_actions() + # Uncomment the following to print out all cluster relation settings in + # log (debug only). + """ + rels = ["%s:%s" % (k, v) for k, v in relation_get().iteritems()] + tag = '\n[debug:%s]' % (remote_unit()) + log("PEER RELATION SETTINGS (unit=%s): %s" % (remote_unit(), + tag.join(rels)), + level=DEBUG) + """ + # NOTE(jamespage) re-echo passwords for peer storage - echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] + echo_whitelist = ['_passwd', 'identity-service:'] unison.ssh_authorized_peers(user=SSH_USER, group='keystone', peer_interface='cluster', ensure_local_user=True) - CONFIGS.write_all() - # If we have a pending cluster formation, defer following actions to the ha - # relation hook instead. - if is_pending_clustered(): - log("Waiting for ha to be 'clustered' - deferring identity-updates " - "and cert sync to ha relation", level=INFO) - return + synced_units = relation_get(attribute='ssl-synced-units', + unit=local_unit()) + if not synced_units or (remote_unit() not in synced_units): + log("Peer '%s' not in list of synced units (%s)" % + (remote_unit(), synced_units), level=INFO) + identity_updates_with_forced_ssl_sync() + else: + identity_updates_with_ssl_sync() - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - identity_changed(relation_id=r_id, remote_unit=unit, - sync_certs=False) + # If we are cert master ignore what other peers have to say + if not is_ssl_cert_master(): + echo_whitelist.append('ssl-cert-master') - synchronize_ca() + # ssl cert sync must be done BEFORE this to reduce the risk of feedback + # loops in cluster relation peer_echo(includes=echo_whitelist) @@ -367,20 +390,16 @@ def ha_joined(): @hooks.hook('ha-relation-changed') @restart_on_change(restart_map()) +@synchronize_ca_if_changed() def ha_changed(): clustered = relation_get('clustered') CONFIGS.write_all() - if clustered is not None and is_elected_leader(CLUSTER_RES): + if clustered and is_elected_leader(CLUSTER_RES): ensure_initial_admin(config) log('Cluster configured, notifying other services and updating ' 'keystone endpoint configuration') - for rid in relation_ids('identity-service'): - for unit in related_units(rid): - identity_changed(relation_id=rid, remote_unit=unit, - sync_certs=False) - - synchronize_ca() + update_all_identity_relation_units() @hooks.hook('identity-admin-relation-changed') @@ -399,6 +418,7 @@ def admin_relation_changed(): relation_set(**relation_data) +@synchronize_ca_if_changed() def configure_https(): ''' Enables SSL API Apache config if appropriate and kicks identity-service @@ -417,6 +437,7 @@ def configure_https(): @hooks.hook('upgrade-charm') @restart_on_change(restart_map(), stopstart=True) +@synchronize_ca_if_changed() def upgrade_charm(): apt_install(filter_installed_packages(determine_packages())) unison.ssh_authorized_peers(user=SSH_USER, @@ -424,17 +445,12 @@ def upgrade_charm(): peer_interface='cluster', ensure_local_user=True) if is_elected_leader(CLUSTER_RES): - log('Cluster leader - ensuring endpoint configuration' - ' is up to date') + log('Cluster leader - ensuring endpoint configuration is up to ' + 'date', level=DEBUG) time.sleep(10) - ensure_initial_admin(config) - # Deal with interface changes for icehouse - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - identity_changed(relation_id=r_id, - remote_unit=unit, sync_certs=False) + update_all_identity_relation_units() + CONFIGS.write_all() - synchronize_ca() def main(): diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 6ee620c9..8e9fd0a8 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -1,13 +1,15 @@ #!/usr/bin/python import glob import grp -import subprocess +import hashlib import os import pwd -import uuid -import urlparse import shutil +import subprocess +import threading import time +import urlparse +import uuid from base64 import b64encode from collections import OrderedDict @@ -56,10 +58,10 @@ from charmhelpers.core.hookenv import ( config, log, local_unit, + related_units, relation_get, relation_set, relation_ids, - unit_get, DEBUG, INFO, WARNING, @@ -130,6 +132,7 @@ SSL_DIR = '/var/lib/keystone/juju_ssl/' SSL_CA_NAME = 'Ubuntu Cloud' CLUSTER_RES = 'grp_ks_vips' SSH_USER = 'juju_keystone' +SSL_SYNC_SEMAPHORE = threading.Semaphore() BASE_RESOURCE_MAP = OrderedDict([ (KEYSTONE_CONF, { @@ -701,7 +704,7 @@ def is_ssl_cert_master(): master = relation_get(attribute='ssl-cert-master', rid=rid, unit=local_unit()) - if master and master == unit_get('private-address'): + if master and master == local_unit(): return True return False @@ -720,6 +723,39 @@ def unison_sync(paths_to_sync): fatal=True) +def is_sync_master(): + if not peer_units(): + log("Not syncing certs since there are no peer units.", level=INFO) + return False + + # If no ssl master elected and we are cluster leader, elect this unit. + if is_elected_leader(CLUSTER_RES): + master = [] + for rid in relation_ids('cluster'): + for unit in related_units(rid): + m = relation_get(rid=rid, unit=unit, + attribute='ssl-cert-master') + if m is not None: + master.append(m) + + master = set(master) + if not master or ('unknown' in master and len(master) == 1): + log("Electing this unit as ssl-cert-master", level=DEBUG) + for rid in relation_ids('cluster'): + settings = {'ssl-cert-master': local_unit(), + 'ssl-synced-units': None} + relation_set(relation_id=rid, relation_settings=settings) + + # Return now and wait for cluster-relation-changed for sync. + return False + + if not is_ssl_cert_master(): + log("Not ssl cert master - skipping sync", level=INFO) + return False + + return True + + def synchronize_ca(fatal=True): """Broadcast service credentials to peers. @@ -733,27 +769,6 @@ def synchronize_ca(fatal=True): """ paths_to_sync = [SYNC_FLAGS_DIR] - if not peer_units(): - log("Not syncing certs since there are no peer units.", level=INFO) - return - - # If no ssl master elected and we are cluster leader, elect this unit. - if is_elected_leader(CLUSTER_RES): - master = relation_get(attribute='ssl-cert-master') - if not master or master == 'unknown': - log("Electing this unit as ssl-cert-master", level=DEBUG) - for rid in relation_ids('cluster'): - relation_set(relation_id=rid, - relation_settings={'ssl-cert-master': - unit_get('private-address'), - 'trigger': str(uuid.uuid4())}) - # Return now and wait for echo before continuing. - return - - if not is_ssl_cert_master(): - log("Not ssl cert master - skipping sync", level=INFO) - return - if str_is_true(config('https-service-endpoints')): log("Syncing all endpoint certs since https-service-endpoints=True", level=DEBUG) @@ -785,7 +800,6 @@ def synchronize_ca(fatal=True): # new ssl keys. create_peer_service_actions('restart', ['apache2']) create_service_action('update-ca-certificates') - try: unison_sync(paths_to_sync) except: @@ -795,23 +809,94 @@ def synchronize_ca(fatal=True): log("Sync failed but fatal=False", level=INFO) return - trigger = str(uuid.uuid4()) - log("Sending restart-services-trigger=%s to all peers" % (trigger), - level=DEBUG) - settings = {'restart-services-trigger': trigger} - # Cleanup shutil.rmtree(SYNC_FLAGS_DIR) mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) - # If we are the sync master but no longer leader then re-elect master. - if not is_elected_leader(CLUSTER_RES): - log("Re-electing ssl cert master.", level=INFO) - settings['ssl-cert-master'] = 'unknown' + trigger = str(uuid.uuid4()) + log("Sending restart-services-trigger=%s to all peers" % (trigger), + level=DEBUG) - log("Sync complete - sending peer info", level=DEBUG) - for rid in relation_ids('cluster'): - relation_set(relation_id=rid, **settings) + log("Sync complete", level=DEBUG) + return {'restart-services-trigger': trigger, + 'ssl-synced-units': peer_units()} + + +def update_hash_from_path(hash, path, recurse_depth=10): + """Recurse through path and update the provided hash for every file found. + """ + if not recurse_depth: + log("Max recursion depth (%s) reached for update_hash_from_path() at " + "path='%s' - not going any deeper" % (recurse_depth, path), + level=WARNING) + return sum + + for p in glob.glob("%s/*" % path): + if os.path.isdir(p): + update_hash_from_path(hash, p, recurse_depth=recurse_depth - 1) + else: + with open(p, 'r') as fd: + hash.update(fd.read()) + + +def synchronize_ca_if_changed(force=False, fatal=True): + """Decorator to perform ssl cert sync if decorated function modifies them + in any way. + + If force is True a sync is done regardless. + """ + def inner_synchronize_ca_if_changed1(f): + def inner_synchronize_ca_if_changed2(*args, **kwargs): + if not is_sync_master(): + return f(*args, **kwargs) + + peer_settings = {} + try: + # Ensure we don't do a double sync if we are nested. + if not force and SSL_SYNC_SEMAPHORE.acquire(blocking=0): + hash1 = hashlib.sha256() + for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]: + update_hash_from_path(hash1, path) + + hash1 = hash1.hexdigest() + + ret = f(*args, **kwargs) + + hash2 = hashlib.sha256() + for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]: + update_hash_from_path(hash2, path) + + hash2 = hash2.hexdigest() + if hash1 != hash2: + log("SSL certs have changed - syncing peers", + level=DEBUG) + peer_settings = synchronize_ca(fatal=fatal) + else: + log("SSL certs have not changed - skipping sync", + level=DEBUG) + else: + ret = f(*args, **kwargs) + if force: + log("Doing forced ssl cert sync", level=DEBUG) + peer_settings = synchronize_ca(fatal=fatal) + + # If we are the sync master but no longer leader then re-elect + # master. + if not is_elected_leader(CLUSTER_RES): + log("Re-electing ssl cert master.", level=INFO) + peer_settings['ssl-cert-master'] = 'unknown' + + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, + relation_settings=peer_settings) + + return ret + finally: + SSL_SYNC_SEMAPHORE.release() + + return inner_synchronize_ca_if_changed2 + + return inner_synchronize_ca_if_changed1 def get_ca(user='keystone', group='keystone'): @@ -838,8 +923,11 @@ def get_ca(user='keystone', group='keystone'): # Tag this host as the ssl cert master. if is_clustered() or peer_units(): - peer_store(key='ssl-cert-master', - value=unit_get('private-address')) + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, + relation_settings={'ssl-cert-master': + local_unit(), + 'synced-units': None}) ssl.CA_SINGLETON.append(ca) @@ -1149,13 +1237,3 @@ def send_notifications(data, force=False): level=DEBUG) for rid in rel_ids: relation_set(relation_id=rid, relation_settings=_notifications) - - -def is_pending_clustered(): - """If we have HA relations but are not yet 'clustered' return True.""" - for r_id in (relation_ids('ha') or []): - for unit in (relation_list(r_id) or []): - if not relation_get('clustered', rid=r_id, unit=unit): - return True - - return False diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 3829f648..dd080f1d 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -1,6 +1,7 @@ from mock import call, patch, MagicMock import os import json +import uuid from test_utils import CharmTestCase @@ -34,6 +35,7 @@ TO_PATCH = [ 'relation_set', 'relation_get', 'related_units', + 'remote_unit', 'unit_get', 'peer_echo', # charmhelpers.core.host @@ -54,7 +56,7 @@ TO_PATCH = [ 'migrate_database', 'ensure_initial_admin', 'add_service_to_keystone', - 'synchronize_ca', + 'synchronize_ca_if_changed', # other 'check_call', 'execd_preinstall', @@ -158,8 +160,12 @@ class KeystoneRelationTests(CharmTestCase): 'Attempting to associate a postgresql database when there ' 'is already associated a mysql one') + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_db_changed_missing_relation_data(self, configs): + def test_db_changed_missing_relation_data(self, configs, mock_peer_units, + mock_log): + mock_peer_units.return_value = None configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] hooks.db_changed() @@ -190,9 +196,13 @@ class KeystoneRelationTests(CharmTestCase): configs.write = MagicMock() hooks.pgsql_db_changed() + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') - def test_db_changed_allowed(self, identity_changed, configs): + def test_db_changed_allowed(self, identity_changed, configs, + mock_peer_units, mock_log): + mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -205,9 +215,13 @@ class KeystoneRelationTests(CharmTestCase): relation_id='identity-service:0', remote_unit='unit/0') + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') - def test_db_changed_not_allowed(self, identity_changed, configs): + def test_db_changed_not_allowed(self, identity_changed, configs, + mock_peer_units, mock_log): + mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -218,9 +232,13 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(self.ensure_initial_admin.called) self.assertFalse(identity_changed.called) + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') - def test_postgresql_db_changed(self, identity_changed, configs): + def test_postgresql_db_changed(self, identity_changed, configs, + mock_peer_units, mock_log): + mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -233,6 +251,7 @@ class KeystoneRelationTests(CharmTestCase): relation_id='identity-service:0', remote_unit='unit/0') + @patch('keystone_utils.is_sync_master') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -243,7 +262,8 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_no_openstack_upgrade_leader( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions): + ensure_permissions, is_sync_master): + is_sync_master.return_value = False self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] @@ -263,9 +283,9 @@ class KeystoneRelationTests(CharmTestCase): 'Firing identity_changed hook for all related services.') identity_changed.assert_called_with( relation_id='identity-service:0', - remote_unit='unit/0', - sync_certs=False) + remote_unit='unit/0') + @patch('keystone_utils.is_sync_master') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -276,7 +296,8 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_no_openstack_upgrade_not_leader( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions): + ensure_permissions, is_sync_master): + is_sync_master.return_value = False self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = False @@ -292,6 +313,7 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(self.ensure_initial_admin.called) self.assertFalse(identity_changed.called) + @patch('keystone_utils.is_sync_master') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -302,7 +324,8 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_with_openstack_upgrade( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions): + ensure_permissions, is_sync_master): + is_sync_master.return_value = False self.openstack_upgrade_available.return_value = True self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] @@ -324,13 +347,32 @@ class KeystoneRelationTests(CharmTestCase): 'Firing identity_changed hook for all related services.') identity_changed.assert_called_with( relation_id='identity-service:0', - remote_unit='unit/0', - sync_certs=False) + remote_unit='unit/0') + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') + @patch('keystone_utils.relation_ids') + @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.update_hash_from_path') + @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'hashlib') @patch.object(hooks, 'send_notifications') def test_identity_changed_leader(self, mock_send_notifications, - mock_hashlib): + mock_hashlib, mock_synchronize_ca, + mock_update_hash_from_path, + mock_is_sync_master, + mock_is_elected_leader, + mock_relation_ids, mock_peer_units, + mock_log): + mock_peer_units.return_value = None + mock_relation_ids.return_value = [] + mock_is_sync_master.return_value = True + mock_is_elected_leader.return_value = True + # Ensure always returns diff + mock_update_hash_from_path.side_effect = \ + lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) + self.is_elected_leader.return_value = True hooks.identity_changed( relation_id='identity-service:0', @@ -338,9 +380,12 @@ class KeystoneRelationTests(CharmTestCase): self.add_service_to_keystone.assert_called_with( 'identity-service:0', 'unit/0') - self.assertTrue(self.synchronize_ca.called) + self.assertTrue(mock_synchronize_ca.called) - def test_identity_changed_no_leader(self): + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') + def test_identity_changed_no_leader(self, mock_peer_units, mock_log): + mock_peer_units.return_value = None self.is_elected_leader.return_value = False hooks.identity_changed( relation_id='identity-service:0', @@ -356,20 +401,34 @@ class KeystoneRelationTests(CharmTestCase): user=self.ssh_user, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) - @patch.object(hooks, 'is_pending_clustered') + @patch('keystone_utils.log') + @patch('keystone_utils.relation_ids') + @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.update_hash_from_path') + @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'check_peer_actions') @patch.object(unison, 'ssh_authorized_peers') @patch.object(hooks, 'CONFIGS') def test_cluster_changed(self, configs, ssh_authorized_peers, - check_peer_actions, is_pending_clustered): - is_pending_clustered.return_value = False + check_peer_actions, + mock_synchronize_ca, mock_update_hash_from_path, + mock_is_sync_master, mock_is_elected_leader, + mock_relation_ids, mock_log): + mock_relation_ids.return_value = [] + mock_is_sync_master.return_value = True + mock_is_elected_leader.return_value = True + # Ensure always returns diff + mock_update_hash_from_path.side_effect = \ + lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) + hooks.cluster_changed() whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] self.peer_echo.assert_called_with(includes=whitelist) ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) - self.assertTrue(self.synchronize_ca.called) + self.assertTrue(mock_synchronize_ca.called) self.assertTrue(configs.write_all.called) def test_ha_joined(self): @@ -419,18 +478,32 @@ class KeystoneRelationTests(CharmTestCase): } self.relation_set.assert_called_with(**args) + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') + @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'CONFIGS') - def test_ha_relation_changed_not_clustered_not_leader(self, configs): + def test_ha_relation_changed_not_clustered_not_leader(self, configs, + mock_synchronize_ca, + mock_peer_units, + mock_log): + mock_peer_units.return_value = None self.relation_get.return_value = False self.is_elected_leader.return_value = False hooks.ha_changed() self.assertTrue(configs.write_all.called) + self.assertTrue(mock_synchronize_ca.called) + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') + @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'CONFIGS') - def test_ha_relation_changed_clustered_leader( - self, configs, identity_changed): + def test_ha_relation_changed_clustered_leader(self, configs, + identity_changed, + mock_synchronize_ca, + mock_peer_units, mock_log): + mock_peer_units.return_value = None self.relation_get.return_value = True self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] @@ -443,11 +516,14 @@ class KeystoneRelationTests(CharmTestCase): 'keystone endpoint configuration') identity_changed.assert_called_with( relation_id='identity-service:0', - remote_unit='unit/0', - sync_certs=False) + remote_unit='unit/0') + self.assertTrue(mock_synchronize_ca.called) + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_configure_https_enable(self, configs): + def test_configure_https_enable(self, configs, mock_peer_units, mock_log): + mock_peer_units.return_value = None configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['https'] configs.write = MagicMock() @@ -457,8 +533,11 @@ class KeystoneRelationTests(CharmTestCase): cmd = ['a2ensite', 'openstack_https_frontend'] self.check_call.assert_called_with(cmd) + @patch('keystone_utils.log') + @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_configure_https_disable(self, configs): + def test_configure_https_disable(self, configs, mock_peer_units, mock_log): + mock_peer_units.return_value = None configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [''] configs.write = MagicMock() @@ -468,8 +547,24 @@ class KeystoneRelationTests(CharmTestCase): cmd = ['a2dissite', 'openstack_https_frontend'] self.check_call.assert_called_with(cmd) + @patch('keystone_utils.relation_ids') + @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.update_hash_from_path') + @patch('keystone_utils.synchronize_ca') @patch.object(unison, 'ssh_authorized_peers') - def test_upgrade_charm_leader(self, ssh_authorized_peers): + def test_upgrade_charm_leader(self, ssh_authorized_peers, + mock_synchronize_ca, + mock_update_hash_from_path, + mock_is_sync_master, mock_is_elected_leader, + mock_relation_ids): + mock_relation_ids.return_value = [] + mock_is_sync_master.return_value = True + mock_is_elected_leader.return_value = True + # Ensure always returns diff + mock_update_hash_from_path.side_effect = \ + lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) + self.is_elected_leader.return_value = True self.filter_installed_packages.return_value = [] hooks.upgrade_charm() @@ -477,14 +572,31 @@ class KeystoneRelationTests(CharmTestCase): ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) - self.assertTrue(self.synchronize_ca.called) + self.assertTrue(mock_synchronize_ca.called) self.log.assert_called_with( 'Cluster leader - ensuring endpoint configuration' ' is up to date') self.assertTrue(self.ensure_initial_admin.called) + @patch('keystone_utils.relation_ids') + @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.update_hash_from_path') + @patch('keystone_utils.synchronize_ca') @patch.object(unison, 'ssh_authorized_peers') - def test_upgrade_charm_not_leader(self, ssh_authorized_peers): + def test_upgrade_charm_not_leader(self, ssh_authorized_peers, + mock_synchronize_ca, + mock_update_hash_from_path, + mock_is_sync_master, + mock_is_elected_leader, + mock_relation_ids): + mock_relation_ids.return_value = [] + mock_is_sync_master.return_value = True + mock_is_elected_leader.return_value = True + # Ensure always returns diff + mock_update_hash_from_path.side_effect = \ + lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) + self.is_elected_leader.return_value = False self.filter_installed_packages.return_value = [] hooks.upgrade_charm() @@ -492,6 +604,6 @@ class KeystoneRelationTests(CharmTestCase): ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) - self.assertTrue(self.synchronize_ca.called) + self.assertTrue(mock_synchronize_ca.called) self.assertFalse(self.log.called) self.assertFalse(self.ensure_initial_admin.called) From fa440c145c235aaf34edecefe184a0c0319d512a Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 12 Jan 2015 12:45:22 +0000 Subject: [PATCH 05/13] Removed sync-master logi --- hooks/keystone_context.py | 17 ++--- hooks/keystone_hooks.py | 10 +-- hooks/keystone_utils.py | 152 ++++++++++++-------------------------- 3 files changed, 59 insertions(+), 120 deletions(-) diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py index cfb7eb72..12a15f7a 100644 --- a/hooks/keystone_context.py +++ b/hooks/keystone_context.py @@ -8,7 +8,8 @@ from charmhelpers.contrib.openstack import context from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, - determine_api_port + determine_api_port, + is_elected_leader, ) from charmhelpers.core.hookenv import ( @@ -37,7 +38,7 @@ class ApacheSSLContext(context.ApacheSSLContext): from keystone_utils import ( SSH_USER, get_ca, - is_ssl_cert_master, + CLUSTER_RES, ensure_permissions, ) @@ -48,9 +49,8 @@ class ApacheSSLContext(context.ApacheSSLContext): ensure_permissions(ssl_dir, user=SSH_USER, group='keystone', perms=perms) - if not is_ssl_cert_master(): - log("Not leader or cert master so skipping apache cert config", - level=INFO) + if not is_elected_leader(CLUSTER_RES): + log("Not leader - skipping apache cert config", level=INFO) return log("Creating apache ssl certs in %s" % (ssl_dir), level=INFO) @@ -66,13 +66,12 @@ class ApacheSSLContext(context.ApacheSSLContext): from keystone_utils import ( SSH_USER, get_ca, - is_ssl_cert_master, + CLUSTER_RES, ensure_permissions, ) - if not is_ssl_cert_master(): - log("Not leader or cert master so skipping apache ca config", - level=INFO) + if not is_elected_leader(CLUSTER_RES): + log("Not leader - skipping apache cert config", level=INFO) return ca = get_ca(user=SSH_USER) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 21a476ac..82a130f1 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -64,7 +64,6 @@ from keystone_utils import ( check_peer_actions, CA_CERT_PATH, ensure_permissions, - is_ssl_cert_master, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -307,7 +306,8 @@ def cluster_changed(): # Uncomment the following to print out all cluster relation settings in # log (debug only). """ - rels = ["%s:%s" % (k, v) for k, v in relation_get().iteritems()] + settings = relation_get() + rels = ["%s:%s" % (k, v) for k, v in settings.iteritems()] tag = '\n[debug:%s]' % (remote_unit()) log("PEER RELATION SETTINGS (unit=%s): %s" % (remote_unit(), tag.join(rels)), @@ -330,9 +330,7 @@ def cluster_changed(): else: identity_updates_with_ssl_sync() - # If we are cert master ignore what other peers have to say - if not is_ssl_cert_master(): - echo_whitelist.append('ssl-cert-master') + echo_whitelist.append('ssl-synced-units') # ssl cert sync must be done BEFORE this to reduce the risk of feedback # loops in cluster relation @@ -372,7 +370,7 @@ def ha_joined(): vip_group.append(vip_key) if len(vip_group) >= 1: - relation_set(groups={'grp_ks_vips': ' '.join(vip_group)}) + relation_set(groups={CLUSTER_RES: ' '.join(vip_group)}) init_services = { 'res_ks_haproxy': 'haproxy' diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 8e9fd0a8..b5021fba 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -4,7 +4,7 @@ import grp import hashlib import os import pwd -import shutil +import re import subprocess import threading import time @@ -19,7 +19,6 @@ from charmhelpers.contrib.hahelpers.cluster import( is_elected_leader, determine_api_port, https, - is_clustered, peer_units, ) @@ -58,7 +57,6 @@ from charmhelpers.core.hookenv import ( config, log, local_unit, - related_units, relation_get, relation_set, relation_ids, @@ -400,7 +398,7 @@ def create_endpoint_template(region, service, publicurl, adminurl, up_to_date = True for k in ['publicurl', 'adminurl', 'internalurl']: - if ep[k] != locals()[k]: + if ep.get(k) != locals()[k]: up_to_date = False if up_to_date: @@ -651,26 +649,37 @@ def check_peer_actions(): if restart and os.path.isdir(SYNC_FLAGS_DIR): for flagfile in glob.glob(os.path.join(SYNC_FLAGS_DIR, '*')): flag = os.path.basename(flagfile) - service = flag.partition('.')[0] - action = flag.partition('.')[2] - - if action == 'restart': - log("Running action='%s' on service '%s'" % - (action, service), level=DEBUG) - service_restart(service) - elif action == 'start': - log("Running action='%s' on service '%s'" % - (action, service), level=DEBUG) - service_start(service) - elif action == 'stop': - log("Running action='%s' on service '%s'" % - (action, service), level=DEBUG) - service_stop(service) - elif flag == 'update-ca-certificates': - log("Running update-ca-certificates", level=DEBUG) - subprocess.check_call(['update-ca-certificates']) + key = re.compile("^(.+)?\.(.+)?\.(.+)") + res = re.search(key, flag) + if res: + source = res. group(1) + service = res. group(2) + action = res. group(3) else: - log("Unknown action flag=%s" % (flag), level=WARNING) + key = re.compile("^(.+)?\.(.+)?") + res = re.search(key, flag) + source = res. group(1) + action = res. group(2) + + # Don't execute actions requested byu this unit. + if local_unit().replace('.', '-') != source: + if action == 'restart': + log("Running action='%s' on service '%s'" % + (action, service), level=DEBUG) + service_restart(service) + elif action == 'start': + log("Running action='%s' on service '%s'" % + (action, service), level=DEBUG) + service_start(service) + elif action == 'stop': + log("Running action='%s' on service '%s'" % + (action, service), level=DEBUG) + service_stop(service) + elif action == 'update-ca-certificates': + log("Running update-ca-certificates", level=DEBUG) + subprocess.check_call(['update-ca-certificates']) + else: + log("Unknown action flag=%s" % (flag), level=WARNING) os.remove(flagfile) @@ -683,33 +692,22 @@ def create_peer_service_actions(action, services): synced. """ for service in services: - flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s' % - (service.strip(), action)) + flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s.%s' % + (local_unit().replace('/', '-'), + service.strip(), action)) log("Creating action %s" % (flagfile), level=DEBUG) write_file(flagfile, content='', owner=SSH_USER, group='keystone', perms=0o644) def create_service_action(action): + action = "%s.%s" % (local_unit().replace('/', '-'), action) flagfile = os.path.join(SYNC_FLAGS_DIR, action) log("Creating action %s" % (flagfile), level=DEBUG) write_file(flagfile, content='', owner=SSH_USER, group='keystone', perms=0o644) -def is_ssl_cert_master(): - """Return True if this unit is ssl cert master.""" - master = None - for rid in relation_ids('cluster'): - master = relation_get(attribute='ssl-cert-master', rid=rid, - unit=local_unit()) - - if master and master == local_unit(): - return True - - return False - - @retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError) def unison_sync(paths_to_sync): """Do unison sync and retry a few times if it fails since peers may not be @@ -723,39 +721,6 @@ def unison_sync(paths_to_sync): fatal=True) -def is_sync_master(): - if not peer_units(): - log("Not syncing certs since there are no peer units.", level=INFO) - return False - - # If no ssl master elected and we are cluster leader, elect this unit. - if is_elected_leader(CLUSTER_RES): - master = [] - for rid in relation_ids('cluster'): - for unit in related_units(rid): - m = relation_get(rid=rid, unit=unit, - attribute='ssl-cert-master') - if m is not None: - master.append(m) - - master = set(master) - if not master or ('unknown' in master and len(master) == 1): - log("Electing this unit as ssl-cert-master", level=DEBUG) - for rid in relation_ids('cluster'): - settings = {'ssl-cert-master': local_unit(), - 'ssl-synced-units': None} - relation_set(relation_id=rid, relation_settings=settings) - - # Return now and wait for cluster-relation-changed for sync. - return False - - if not is_ssl_cert_master(): - log("Not ssl cert master - skipping sync", level=INFO) - return False - - return True - - def synchronize_ca(fatal=True): """Broadcast service credentials to peers. @@ -785,16 +750,8 @@ def synchronize_ca(fatal=True): log("Nothing to sync - skipping", level=DEBUG) return - # If we are sync master proceed even if we are not leader since we are - # most likely to have up-to-date certs. If not leader we will re-elect once - # synced. This is done to avoid being affected by leader changing before - # all units have synced certs. - - # Clear any existing flags - if os.path.isdir(SYNC_FLAGS_DIR): - shutil.rmtree(SYNC_FLAGS_DIR) - - mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) + if not os.path.isdir(SYNC_FLAGS_DIR): + mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) # We need to restart peer apache services to ensure they have picked up # new ssl keys. @@ -809,10 +766,6 @@ def synchronize_ca(fatal=True): log("Sync failed but fatal=False", level=INFO) return - # Cleanup - shutil.rmtree(SYNC_FLAGS_DIR) - mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) - trigger = str(uuid.uuid4()) log("Sending restart-services-trigger=%s to all peers" % (trigger), level=DEBUG) @@ -829,7 +782,7 @@ def update_hash_from_path(hash, path, recurse_depth=10): log("Max recursion depth (%s) reached for update_hash_from_path() at " "path='%s' - not going any deeper" % (recurse_depth, path), level=WARNING) - return sum + return for p in glob.glob("%s/*" % path): if os.path.isdir(p): @@ -847,13 +800,16 @@ def synchronize_ca_if_changed(force=False, fatal=True): """ def inner_synchronize_ca_if_changed1(f): def inner_synchronize_ca_if_changed2(*args, **kwargs): - if not is_sync_master(): - return f(*args, **kwargs) - - peer_settings = {} + # Only sync master can do sync. Ensure (a) we are not nested and + # (b) a master is elected and we are it. try: + acquired = SSL_SYNC_SEMAPHORE.acquire(blocking=0) + if not acquired or not is_elected_leader(CLUSTER_RES): + return f(*args, **kwargs) + + peer_settings = {} # Ensure we don't do a double sync if we are nested. - if not force and SSL_SYNC_SEMAPHORE.acquire(blocking=0): + if not force: hash1 = hashlib.sha256() for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]: update_hash_from_path(hash1, path) @@ -880,12 +836,6 @@ def synchronize_ca_if_changed(force=False, fatal=True): log("Doing forced ssl cert sync", level=DEBUG) peer_settings = synchronize_ca(fatal=fatal) - # If we are the sync master but no longer leader then re-elect - # master. - if not is_elected_leader(CLUSTER_RES): - log("Re-electing ssl cert master.", level=INFO) - peer_settings['ssl-cert-master'] = 'unknown' - for rid in relation_ids('cluster'): relation_set(relation_id=rid, relation_settings=peer_settings) @@ -921,14 +871,6 @@ def get_ca(user='keystone', group='keystone'): '%s' % SSL_DIR]) subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR]) - # Tag this host as the ssl cert master. - if is_clustered() or peer_units(): - for rid in relation_ids('cluster'): - relation_set(relation_id=rid, - relation_settings={'ssl-cert-master': - local_unit(), - 'synced-units': None}) - ssl.CA_SINGLETON.append(ca) return ssl.CA_SINGLETON[0] From 8ec5688fe446ce5970ceb58371599b8a0b247043 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 12 Jan 2015 17:02:08 +0000 Subject: [PATCH 06/13] sync only fatal=True in cluster relation --- hooks/keystone_hooks.py | 8 ++++---- hooks/keystone_utils.py | 11 ++++++----- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 82a130f1..92ca76a1 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -100,7 +100,7 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map()) -@synchronize_ca_if_changed(fatal=False) +@synchronize_ca_if_changed() def config_changed(): if config('prefer-ipv6'): setup_ipv6() @@ -286,13 +286,13 @@ def cluster_joined(relation_id=None): relation_settings={'private-address': private_addr}) -@synchronize_ca_if_changed() +@synchronize_ca_if_changed(fatal=True) def identity_updates_with_ssl_sync(): CONFIGS.write_all() update_all_identity_relation_units() -@synchronize_ca_if_changed(force=True) +@synchronize_ca_if_changed(force=True, fatal=True) def identity_updates_with_forced_ssl_sync(): identity_updates_with_ssl_sync() @@ -416,7 +416,7 @@ def admin_relation_changed(): relation_set(**relation_data) -@synchronize_ca_if_changed() +@synchronize_ca_if_changed(fatal=True) def configure_https(): ''' Enables SSL API Apache config if appropriate and kicks identity-service diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index b5021fba..8199d641 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -721,7 +721,7 @@ def unison_sync(paths_to_sync): fatal=True) -def synchronize_ca(fatal=True): +def synchronize_ca(fatal=False): """Broadcast service credentials to peers. By default a failure to sync is fatal and will result in a raised @@ -792,7 +792,7 @@ def update_hash_from_path(hash, path, recurse_depth=10): hash.update(fd.read()) -def synchronize_ca_if_changed(force=False, fatal=True): +def synchronize_ca_if_changed(force=False, fatal=False): """Decorator to perform ssl cert sync if decorated function modifies them in any way. @@ -836,9 +836,10 @@ def synchronize_ca_if_changed(force=False, fatal=True): log("Doing forced ssl cert sync", level=DEBUG) peer_settings = synchronize_ca(fatal=fatal) - for rid in relation_ids('cluster'): - relation_set(relation_id=rid, - relation_settings=peer_settings) + if peer_settings: + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, + relation_settings=peer_settings) return ret finally: From 4a9602f1c117c5a582d709e966423f2e2bb192f5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 13 Jan 2015 11:04:56 +0000 Subject: [PATCH 07/13] check syncs and retry if inconsistent --- hooks/keystone_hooks.py | 17 +++-- hooks/keystone_ssl.py | 54 ++++++++++++---- hooks/keystone_utils.py | 138 +++++++++++++++++++++++++++------------- 3 files changed, 147 insertions(+), 62 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 92ca76a1..c6214851 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -64,6 +64,7 @@ from keystone_utils import ( check_peer_actions, CA_CERT_PATH, ensure_permissions, + print_rel_debug, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -219,6 +220,8 @@ def pgsql_db_changed(): @hooks.hook('identity-service-relation-changed') @synchronize_ca_if_changed() def identity_changed(relation_id=None, remote_unit=None): + CONFIGS.write_all() + notifications = {} if is_elected_leader(CLUSTER_RES): # Catch database not configured error and defer until db ready @@ -236,8 +239,6 @@ def identity_changed(relation_id=None, remote_unit=None): log("Unexpected exception occurred", level=ERROR) raise - CONFIGS.write_all() - settings = relation_get(rid=relation_id, unit=remote_unit) service = settings.get('service', None) if service: @@ -332,6 +333,10 @@ def cluster_changed(): echo_whitelist.append('ssl-synced-units') + echo_settings = {k: v for k, v in relation_get().iteritems() + if k in echo_whitelist} + print_rel_debug(echo_settings, None, None, 'cluster_changed', '1') + # ssl cert sync must be done BEFORE this to reduce the risk of feedback # loops in cluster relation peer_echo(includes=echo_whitelist) @@ -390,8 +395,9 @@ def ha_joined(): @restart_on_change(restart_map()) @synchronize_ca_if_changed() def ha_changed(): - clustered = relation_get('clustered') CONFIGS.write_all() + + clustered = relation_get('clustered') if clustered and is_elected_leader(CLUSTER_RES): ensure_initial_admin(config) log('Cluster configured, notifying other services and updating ' @@ -442,14 +448,15 @@ def upgrade_charm(): group='keystone', peer_interface='cluster', ensure_local_user=True) + + CONFIGS.write_all() + if is_elected_leader(CLUSTER_RES): log('Cluster leader - ensuring endpoint configuration is up to ' 'date', level=DEBUG) time.sleep(10) update_all_identity_relation_units() - CONFIGS.write_all() - def main(): try: diff --git a/hooks/keystone_ssl.py b/hooks/keystone_ssl.py index 6b2f4a85..4f6fce11 100644 --- a/hooks/keystone_ssl.py +++ b/hooks/keystone_ssl.py @@ -5,6 +5,13 @@ import shutil import subprocess import tarfile import tempfile +import time + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) CA_EXPIRY = '365' ORG_NAME = 'Ubuntu' @@ -278,23 +285,42 @@ class JujuCA(object): crt = self._sign_csr(csr, service, common_name) cmd = ['chown', '-R', '%s.%s' % (self.user, self.group), self.ca_dir] subprocess.check_call(cmd) - print 'Signed new CSR, crt @ %s' % crt + log('Signed new CSR, crt @ %s' % crt, level=DEBUG) return crt, key def get_cert_and_key(self, common_name): - print 'Getting certificate and key for %s.' % common_name - key = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name) - crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name) - if os.path.isfile(crt): - print 'Found existing certificate for %s.' % common_name - crt = open(crt, 'r').read() - try: - key = open(key, 'r').read() - except: - print 'Could not load ssl private key for %s from %s' %\ - (common_name, key) - exit(1) - return crt, key + log('Getting certificate and key for %s.' % common_name, level=DEBUG) + keypath = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name) + crtpath = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name) + if os.path.isfile(crtpath): + log('Found existing certificate for %s.' % common_name, + level=DEBUG) + max_retries = 3 + while True: + mtime = os.path.getmtime(crtpath) + + crt = open(crtpath, 'r').read() + try: + key = open(keypath, 'r').read() + except: + msg = ('Could not load ssl private key for %s from %s' % + (common_name, keypath)) + raise Exception(msg) + + # Ensure we are not reading a file that is being written to + if mtime != os.path.getmtime(crtpath): + max_retries -= 1 + if max_retries == 0: + msg = ("crt contents changed during read - retry " + "failed") + raise Exception(msg) + + log("crt contents changed during read - re-reading", + level=WARNING) + time.sleep(1) + else: + return crt, key + crt, key = self._create_certificate(common_name, common_name) return open(crt, 'r').read(), open(key, 'r').read() diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 8199d641..b62f0ddb 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -63,6 +63,7 @@ from charmhelpers.core.hookenv import ( DEBUG, INFO, WARNING, + ERROR, ) from charmhelpers.fetch import ( @@ -652,14 +653,14 @@ def check_peer_actions(): key = re.compile("^(.+)?\.(.+)?\.(.+)") res = re.search(key, flag) if res: - source = res. group(1) - service = res. group(2) - action = res. group(3) + source = res.group(1) + service = res.group(2) + action = res.group(3) else: key = re.compile("^(.+)?\.(.+)?") res = re.search(key, flag) - source = res. group(1) - action = res. group(2) + source = res.group(1) + action = res.group(2) # Don't execute actions requested byu this unit. if local_unit().replace('.', '-') != source: @@ -676,7 +677,7 @@ def check_peer_actions(): (action, service), level=DEBUG) service_stop(service) elif action == 'update-ca-certificates': - log("Running update-ca-certificates", level=DEBUG) + log("Running %s" % (action), level=DEBUG) subprocess.check_call(['update-ca-certificates']) else: log("Unknown action flag=%s" % (flag), level=WARNING) @@ -700,12 +701,13 @@ def create_peer_service_actions(action, services): perms=0o644) -def create_service_action(action): - action = "%s.%s" % (local_unit().replace('/', '-'), action) - flagfile = os.path.join(SYNC_FLAGS_DIR, action) - log("Creating action %s" % (flagfile), level=DEBUG) - write_file(flagfile, content='', owner=SSH_USER, group='keystone', - perms=0o644) +def create_peer_actions(actions): + for action in actions: + action = "%s.%s" % (local_unit().replace('/', '-'), action) + flagfile = os.path.join(SYNC_FLAGS_DIR, action) + log("Creating action %s" % (flagfile), level=DEBUG) + write_file(flagfile, content='', owner=SSH_USER, group='keystone', + perms=0o644) @retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError) @@ -756,22 +758,47 @@ def synchronize_ca(fatal=False): # We need to restart peer apache services to ensure they have picked up # new ssl keys. create_peer_service_actions('restart', ['apache2']) - create_service_action('update-ca-certificates') - try: - unison_sync(paths_to_sync) - except: - if fatal: - raise - else: - log("Sync failed but fatal=False", level=INFO) - return + create_peer_actions(['update-ca-certificates']) - trigger = str(uuid.uuid4()) - log("Sending restart-services-trigger=%s to all peers" % (trigger), + retries = 3 + while True: + hash1 = hashlib.sha256() + for path in paths_to_sync: + update_hash_from_path(hash1, path) + + try: + unison_sync(paths_to_sync) + except: + if fatal: + raise + else: + log("Sync failed but fatal=False", level=INFO) + return + + hash2 = hashlib.sha256() + for path in paths_to_sync: + update_hash_from_path(hash2, path) + + # Detect whether someone else has synced to this unit while we did our + # transfer. + if hash1.hexdigest() != hash2.hexdigest(): + retries -= 1 + if retries > 0: + log("SSL dir contents changed during sync - retrying unison " + "sync %s more times" % (retries), level=WARNING) + else: + log("SSL dir contents changed during sync - retries failed", + level=ERROR) + return {} + else: + break + + hash = hash1.hexdigest() + log("Sending restart-services-trigger=%s to all peers" % (hash), level=DEBUG) log("Sync complete", level=DEBUG) - return {'restart-services-trigger': trigger, + return {'restart-services-trigger': hash, 'ssl-synced-units': peer_units()} @@ -802,28 +829,31 @@ def synchronize_ca_if_changed(force=False, fatal=False): def inner_synchronize_ca_if_changed2(*args, **kwargs): # Only sync master can do sync. Ensure (a) we are not nested and # (b) a master is elected and we are it. + acquired = SSL_SYNC_SEMAPHORE.acquire(blocking=0) try: - acquired = SSL_SYNC_SEMAPHORE.acquire(blocking=0) - if not acquired or not is_elected_leader(CLUSTER_RES): + if not acquired: + log("Nested sync - ignoring", level=DEBUG) + return f(*args, **kwargs) + + if not is_elected_leader(CLUSTER_RES): + log("Not leader - ignoring sync", level=DEBUG) return f(*args, **kwargs) peer_settings = {} - # Ensure we don't do a double sync if we are nested. if not force: - hash1 = hashlib.sha256() - for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]: - update_hash_from_path(hash1, path) + ssl_dirs = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] - hash1 = hash1.hexdigest() + hash1 = hashlib.sha256() + for path in ssl_dirs: + update_hash_from_path(hash1, path) ret = f(*args, **kwargs) hash2 = hashlib.sha256() - for path in [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]: + for path in ssl_dirs: update_hash_from_path(hash2, path) - hash2 = hash2.hexdigest() - if hash1 != hash2: + if hash1.hexdigest() != hash2.hexdigest(): log("SSL certs have changed - syncing peers", level=DEBUG) peer_settings = synchronize_ca(fatal=fatal) @@ -832,9 +862,8 @@ def synchronize_ca_if_changed(force=False, fatal=False): level=DEBUG) else: ret = f(*args, **kwargs) - if force: - log("Doing forced ssl cert sync", level=DEBUG) - peer_settings = synchronize_ca(fatal=fatal) + log("Doing forced ssl cert sync", level=DEBUG) + peer_settings = synchronize_ca(fatal=fatal) if peer_settings: for rid in relation_ids('cluster'): @@ -889,6 +918,22 @@ def relation_list(rid): return result +def print_rel_debug(relation_data, remote_unit, relation_id, tag, name): + debug_settings = relation_get(unit=local_unit(), rid=relation_id) + diff = {k: {'b': debug_settings[k], 'a': v} for k, v in + relation_data.iteritems() + if (k in debug_settings and + relation_data[k] != debug_settings.get(k))} + + unchanged = [k for k in debug_settings.iterkeys() + if k not in relation_data] + + log("[debug:%s:%s:%s:%s] diff=%s" % + (name, tag, remote_unit, relation_id, str(diff)), level=DEBUG) + log("[debug:%s:%s:%s:%s] unchanged=%s" % + (name, tag, remote_unit, relation_id, unchanged), level=DEBUG) + + def add_service_to_keystone(relation_id=None, remote_unit=None): import manager manager = manager.KeystoneManager(endpoint=get_local_endpoint(), @@ -900,7 +945,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): https_cns = [] if single.issubset(settings): # other end of relation advertised only one endpoint - if 'None' in [v for k, v in settings.iteritems()]: + if 'None' in settings.itervalues(): # Some backend services advertise no endpoint but require a # hook execution to update auth strategy. relation_data = {} @@ -928,6 +973,10 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): for role in get_requested_roles(settings): log("Creating requested role: %s" % role) create_role(role) + + print_rel_debug(relation_data, remote_unit, relation_id, "1", + 'add-svc-to-ks') + peer_store_and_set(relation_id=relation_id, **relation_data) return @@ -1003,7 +1052,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): if prefix: service_username = "%s%s" % (prefix, service_username) - if 'None' in [v for k, v in settings.iteritems()]: + if 'None' in settings.itervalues(): return if not service_username: @@ -1041,10 +1090,10 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): "service_password": service_password, "service_tenant": service_tenant, "service_tenant_id": manager.resolve_tenant_id(service_tenant), - "https_keystone": "False", - "ssl_cert": "", - "ssl_key": "", - "ca_cert": "" + "https_keystone": None, + "ssl_cert": None, + "ssl_key": None, + "ca_cert": None } # Check if https is enabled @@ -1070,6 +1119,9 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): ca_bundle = ca.get_ca_bundle() relation_data['ca_cert'] = b64encode(ca_bundle) relation_data['https_keystone'] = 'True' + + print_rel_debug(relation_data, remote_unit, relation_id, "2", + 'add-svc-to-ks') peer_store_and_set(relation_id=relation_id, **relation_data) From 2fa428e50be63ec0a04325bcba55b0fd953e95c9 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 13 Jan 2015 13:43:07 +0000 Subject: [PATCH 08/13] tests passing and cleanup --- hooks/keystone_hooks.py | 16 --- hooks/keystone_utils.py | 21 ---- unit_tests/test_keystone_hooks.py | 156 +++++++++++++++--------------- unit_tests/test_keystone_utils.py | 8 +- 4 files changed, 82 insertions(+), 119 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 3f4fcdf7..3397fd22 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -65,7 +65,6 @@ from keystone_utils import ( check_peer_actions, CA_CERT_PATH, ensure_permissions, - print_rel_debug, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -308,17 +307,6 @@ def identity_updates_with_forced_ssl_sync(): def cluster_changed(): check_peer_actions() - # Uncomment the following to print out all cluster relation settings in - # log (debug only). - """ - settings = relation_get() - rels = ["%s:%s" % (k, v) for k, v in settings.iteritems()] - tag = '\n[debug:%s]' % (remote_unit()) - log("PEER RELATION SETTINGS (unit=%s): %s" % (remote_unit(), - tag.join(rels)), - level=DEBUG) - """ - # NOTE(jamespage) re-echo passwords for peer storage echo_whitelist = ['_passwd', 'identity-service:'] unison.ssh_authorized_peers(user=SSH_USER, @@ -337,10 +325,6 @@ def cluster_changed(): echo_whitelist.append('ssl-synced-units') - echo_settings = {k: v for k, v in relation_get().iteritems() - if k in echo_whitelist} - print_rel_debug(echo_settings, None, None, 'cluster_changed', '1') - # ssl cert sync must be done BEFORE this to reduce the risk of feedback # loops in cluster relation peer_echo(includes=echo_whitelist) diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 8b4a41b6..431653f8 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -929,22 +929,6 @@ def relation_list(rid): return result -def print_rel_debug(relation_data, remote_unit, relation_id, tag, name): - debug_settings = relation_get(unit=local_unit(), rid=relation_id) - diff = {k: {'b': debug_settings[k], 'a': v} for k, v in - relation_data.iteritems() - if (k in debug_settings and - relation_data[k] != debug_settings.get(k))} - - unchanged = [k for k in debug_settings.iterkeys() - if k not in relation_data] - - log("[debug:%s:%s:%s:%s] diff=%s" % - (name, tag, remote_unit, relation_id, str(diff)), level=DEBUG) - log("[debug:%s:%s:%s:%s] unchanged=%s" % - (name, tag, remote_unit, relation_id, unchanged), level=DEBUG) - - def add_service_to_keystone(relation_id=None, remote_unit=None): import manager manager = manager.KeystoneManager(endpoint=get_local_endpoint(), @@ -985,9 +969,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): log("Creating requested role: %s" % role) create_role(role) - print_rel_debug(relation_data, remote_unit, relation_id, "1", - 'add-svc-to-ks') - peer_store_and_set(relation_id=relation_id, **relation_data) return @@ -1131,8 +1112,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): relation_data['ca_cert'] = b64encode(ca_bundle) relation_data['https_keystone'] = 'True' - print_rel_debug(relation_data, remote_unit, relation_id, "2", - 'add-svc-to-ks') peer_store_and_set(relation_id=relation_id, **relation_data) diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index d7dc83f8..d3a1daef 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -31,7 +31,6 @@ TO_PATCH = [ 'local_unit', 'filter_installed_packages', 'relation_ids', - 'relation_list', 'relation_set', 'relation_get', 'related_units', @@ -162,10 +161,13 @@ class KeystoneRelationTests(CharmTestCase): 'is already associated a mysql one') @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') def test_db_changed_missing_relation_data(self, configs, mock_peer_units, + mock_is_elected_leader, mock_log): + mock_is_elected_leader.return_value = False mock_peer_units.return_value = None configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] @@ -174,8 +176,12 @@ class KeystoneRelationTests(CharmTestCase): 'shared-db relation incomplete. Peer not ready?' ) + @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch.object(hooks, 'CONFIGS') - def test_postgresql_db_changed_missing_relation_data(self, configs): + def test_postgresql_db_changed_missing_relation_data(self, configs, + mock_is_leader, + mock_log): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] hooks.pgsql_db_changed() @@ -198,11 +204,13 @@ class KeystoneRelationTests(CharmTestCase): hooks.pgsql_db_changed() @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_allowed(self, identity_changed, configs, - mock_peer_units, mock_log): + mock_peer_units, mock_is_elected_leader, + mock_log): mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -217,11 +225,13 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_not_allowed(self, identity_changed, configs, - mock_peer_units, mock_log): + mock_peer_units, mock_is_elected_leader, + mock_log): mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -234,11 +244,13 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(identity_changed.called) @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_postgresql_db_changed(self, identity_changed, configs, - mock_peer_units, mock_log): + mock_peer_units, mock_is_elected_leader, + mock_log): mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -252,7 +264,9 @@ class KeystoneRelationTests(CharmTestCase): relation_id='identity-service:0', remote_unit='unit/0') - @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') + @patch.object(hooks, 'peer_units') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -263,12 +277,15 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_no_openstack_upgrade_leader( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions, is_sync_master): - is_sync_master.return_value = False + ensure_permissions, mock_peer_units, mock_is_elected_leader, + mock_log): self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = True + # avoid having to mock syncer + mock_is_elected_leader.return_value = False + mock_peer_units.return_value = [] self.relation_ids.return_value = ['identity-service:0'] - self.relation_list.return_value = ['unit/0'] + self.related_units.return_value = ['unit/0'] hooks.config_changed() ensure_user.assert_called_with(user=self.ssh_user, group='keystone') @@ -286,7 +303,8 @@ class KeystoneRelationTests(CharmTestCase): relation_id='identity-service:0', remote_unit='unit/0') - @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -297,10 +315,11 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_no_openstack_upgrade_not_leader( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions, is_sync_master): - is_sync_master.return_value = False + ensure_permissions, mock_is_elected_leader, + mock_log): self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = False + mock_is_elected_leader.return_value = False hooks.config_changed() ensure_user.assert_called_with(user=self.ssh_user, group='keystone') @@ -314,7 +333,9 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(self.ensure_initial_admin.called) self.assertFalse(identity_changed.called) - @patch('keystone_utils.is_sync_master') + @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') + @patch.object(hooks, 'peer_units') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -325,12 +346,15 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_with_openstack_upgrade( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions, is_sync_master): - is_sync_master.return_value = False + ensure_permissions, mock_peer_units, mock_is_elected_leader, + mock_log): self.openstack_upgrade_available.return_value = True self.is_elected_leader.return_value = True + # avoid having to mock syncer + mock_is_elected_leader.return_value = False + mock_peer_units.return_value = [] self.relation_ids.return_value = ['identity-service:0'] - self.relation_list.return_value = ['unit/0'] + self.related_units.return_value = ['unit/0'] hooks.config_changed() ensure_user.assert_called_with(user=self.ssh_user, group='keystone') @@ -351,42 +375,24 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') - @patch('keystone_utils.peer_units') - @patch('keystone_utils.relation_ids') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.is_sync_master') - @patch('keystone_utils.update_hash_from_path') - @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'hashlib') @patch.object(hooks, 'send_notifications') def test_identity_changed_leader(self, mock_send_notifications, - mock_hashlib, mock_synchronize_ca, - mock_update_hash_from_path, - mock_is_sync_master, - mock_is_elected_leader, - mock_relation_ids, mock_peer_units, + mock_hashlib, mock_is_elected_leader, mock_log): - mock_peer_units.return_value = None - mock_relation_ids.return_value = [] - mock_is_sync_master.return_value = True mock_is_elected_leader.return_value = True - # Ensure always returns diff - mock_update_hash_from_path.side_effect = \ - lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) - - self.is_elected_leader.return_value = True hooks.identity_changed( relation_id='identity-service:0', remote_unit='unit/0') self.add_service_to_keystone.assert_called_with( 'identity-service:0', 'unit/0') - self.assertTrue(mock_synchronize_ca.called) @patch('keystone_utils.log') - @patch('keystone_utils.peer_units') - def test_identity_changed_no_leader(self, mock_peer_units, mock_log): - mock_peer_units.return_value = None + @patch('keystone_utils.is_elected_leader') + def test_identity_changed_no_leader(self, mock_is_elected_leader, + mock_log): self.is_elected_leader.return_value = False hooks.identity_changed( relation_id='identity-service:0', @@ -403,33 +409,23 @@ class KeystoneRelationTests(CharmTestCase): peer_interface='cluster', ensure_local_user=True) @patch('keystone_utils.log') - @patch('keystone_utils.relation_ids') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.is_sync_master') - @patch('keystone_utils.update_hash_from_path') @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'check_peer_actions') @patch.object(unison, 'ssh_authorized_peers') @patch.object(hooks, 'CONFIGS') def test_cluster_changed(self, configs, ssh_authorized_peers, - check_peer_actions, - mock_synchronize_ca, mock_update_hash_from_path, - mock_is_sync_master, mock_is_elected_leader, - mock_relation_ids, mock_log): - mock_relation_ids.return_value = [] - mock_is_sync_master.return_value = True - mock_is_elected_leader.return_value = True - # Ensure always returns diff - mock_update_hash_from_path.side_effect = \ - lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) - + check_peer_actions, mock_synchronize_ca, + mock_is_elected_leader, + mock_log): + mock_is_elected_leader.return_value = False hooks.cluster_changed() - whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master'] + whitelist = ['_passwd', 'identity-service:', 'ssl-synced-units'] self.peer_echo.assert_called_with(includes=whitelist) ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) - self.assertTrue(mock_synchronize_ca.called) + self.assertFalse(mock_synchronize_ca.called) self.assertTrue(configs.write_all.called) def test_ha_joined(self): @@ -480,31 +476,36 @@ class KeystoneRelationTests(CharmTestCase): self.relation_set.assert_called_with(**args) @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_not_clustered_not_leader(self, configs, mock_synchronize_ca, mock_peer_units, + mock_is_leader, mock_log): mock_peer_units.return_value = None + mock_is_leader.return_value = False self.relation_get.return_value = False self.is_elected_leader.return_value = False hooks.ha_changed() self.assertTrue(configs.write_all.called) - self.assertTrue(mock_synchronize_ca.called) + self.assertFalse(mock_synchronize_ca.called) @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') - @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_clustered_leader(self, configs, identity_changed, - mock_synchronize_ca, - mock_peer_units, mock_log): + mock_peer_units, + mock_is_elected_leader, + mock_log): mock_peer_units.return_value = None + mock_is_elected_leader.return_value = False self.relation_get.return_value = True self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] @@ -513,18 +514,20 @@ class KeystoneRelationTests(CharmTestCase): hooks.ha_changed() self.assertTrue(configs.write_all.called) self.log.assert_called_with( - 'Cluster configured, notifying other services and updating ' - 'keystone endpoint configuration') + 'Firing identity_changed hook for all related services.') identity_changed.assert_called_with( relation_id='identity-service:0', remote_unit='unit/0') - self.assertTrue(mock_synchronize_ca.called) @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_configure_https_enable(self, configs, mock_peer_units, mock_log): + def test_configure_https_enable(self, configs, mock_peer_units, + mock_is_elected_leader, + mock_log): mock_peer_units.return_value = None + mock_is_elected_leader.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['https'] configs.write = MagicMock() @@ -535,10 +538,14 @@ class KeystoneRelationTests(CharmTestCase): self.check_call.assert_called_with(cmd) @patch('keystone_utils.log') + @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_configure_https_disable(self, configs, mock_peer_units, mock_log): + def test_configure_https_disable(self, configs, mock_peer_units, + mock_is_elected_leader, + mock_log): mock_peer_units.return_value = None + mock_is_elected_leader.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [''] configs.write = MagicMock() @@ -548,19 +555,19 @@ class KeystoneRelationTests(CharmTestCase): cmd = ['a2dissite', 'openstack_https_frontend'] self.check_call.assert_called_with(cmd) + @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.is_sync_master') @patch('keystone_utils.update_hash_from_path') @patch('keystone_utils.synchronize_ca') @patch.object(unison, 'ssh_authorized_peers') def test_upgrade_charm_leader(self, ssh_authorized_peers, mock_synchronize_ca, mock_update_hash_from_path, - mock_is_sync_master, mock_is_elected_leader, - mock_relation_ids): + mock_is_elected_leader, + mock_relation_ids, + mock_log): mock_relation_ids.return_value = [] - mock_is_sync_master.return_value = True mock_is_elected_leader.return_value = True # Ensure always returns diff mock_update_hash_from_path.side_effect = \ @@ -575,25 +582,21 @@ class KeystoneRelationTests(CharmTestCase): peer_interface='cluster', ensure_local_user=True) self.assertTrue(mock_synchronize_ca.called) self.log.assert_called_with( - 'Cluster leader - ensuring endpoint configuration' - ' is up to date') + 'Firing identity_changed hook for all related services.') self.assertTrue(self.ensure_initial_admin.called) + @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.is_sync_master') @patch('keystone_utils.update_hash_from_path') - @patch('keystone_utils.synchronize_ca') @patch.object(unison, 'ssh_authorized_peers') def test_upgrade_charm_not_leader(self, ssh_authorized_peers, - mock_synchronize_ca, mock_update_hash_from_path, - mock_is_sync_master, mock_is_elected_leader, - mock_relation_ids): + mock_relation_ids, + mock_log): mock_relation_ids.return_value = [] - mock_is_sync_master.return_value = True - mock_is_elected_leader.return_value = True + mock_is_elected_leader.return_value = False # Ensure always returns diff mock_update_hash_from_path.side_effect = \ lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) @@ -605,6 +608,5 @@ class KeystoneRelationTests(CharmTestCase): ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) - self.assertTrue(mock_synchronize_ca.called) self.assertFalse(self.log.called) self.assertFalse(self.ensure_initial_admin.called) diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index 53a874cd..b94f0f26 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -28,7 +28,6 @@ TO_PATCH = [ 'configure_installation_source', 'is_elected_leader', 'https', - 'is_clustered', 'peer_store_and_set', 'service_stop', 'service_start', @@ -200,7 +199,6 @@ class TestKeystoneUtils(CharmTestCase): self.resolve_address.return_value = '10.0.0.3' self.test_config.set('admin-port', 80) self.test_config.set('service-port', 81) - self.is_clustered.return_value = False self.https.return_value = False self.test_config.set('https-service-endpoints', 'False') self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/' @@ -233,9 +231,9 @@ class TestKeystoneUtils(CharmTestCase): 'auth_port': 80, 'service_username': 'keystone', 'service_password': 'password', 'service_tenant': 'tenant', - 'https_keystone': 'False', - 'ssl_cert': '', 'ssl_key': '', - 'ca_cert': '', 'auth_host': '10.0.0.3', + 'https_keystone': None, + 'ssl_cert': None, 'ssl_key': None, + 'ca_cert': None, 'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', 'auth_protocol': 'http', 'service_protocol': 'http', 'service_tenant_id': 'tenant_id'} From 03b0fd66f89c69c244523ad88505ef6ca25af4d8 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 13 Jan 2015 16:01:25 +0000 Subject: [PATCH 09/13] add leader protection to cluster-changed hook --- hooks/keystone_hooks.py | 63 ++++++++++++++++++------------- hooks/keystone_utils.py | 4 +- unit_tests/test_keystone_hooks.py | 52 ++++++++++--------------- 3 files changed, 56 insertions(+), 63 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 3397fd22..eeea2923 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -18,7 +18,6 @@ from charmhelpers.core.hookenv import ( log, local_unit, DEBUG, - INFO, WARNING, ERROR, relation_get, @@ -171,6 +170,7 @@ def pgsql_db_joined(): def update_all_identity_relation_units(): + CONFIGS.write_all() try: migrate_database() except Exception as exc: @@ -184,6 +184,11 @@ def update_all_identity_relation_units(): identity_changed(relation_id=rid, remote_unit=unit) +@synchronize_ca_if_changed(force=True) +def update_all_identity_relation_units_force_sync(): + update_all_identity_relation_units() + + @hooks.hook('shared-db-relation-changed') @restart_on_change(restart_map()) @synchronize_ca_if_changed() @@ -269,36 +274,40 @@ def identity_changed(relation_id=None, remote_unit=None): @hooks.hook('cluster-relation-joined') -def cluster_joined(relation_id=None): +def cluster_joined(): unison.ssh_authorized_peers(user=SSH_USER, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) + + settings = {} + for addr_type in ADDRESS_TYPES: address = get_address_in_network( config('os-{}-network'.format(addr_type)) ) if address: - relation_set( - relation_id=relation_id, - relation_settings={'{}-address'.format(addr_type): address} - ) + settings['{}-address'.format(addr_type)] = address if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] - relation_set(relation_id=relation_id, - relation_settings={'private-address': private_addr}) + settings['private-address'] = private_addr + + # This will be consumed by -changed for ssl sync + settings['ssl-sync-required-%s' % (remote_unit().replace('/', '-'))] = '1' + + relation_set(relation_settings=settings) -@synchronize_ca_if_changed(fatal=True) -def identity_updates_with_ssl_sync(): - CONFIGS.write_all() - update_all_identity_relation_units() +def get_new_peers(peers): + units = [] + key = re.compile("^ssl-sync-required-(.+)") + for peer in peers: + res = re.search(key, peer) + if res: + units.append(res.group(1)) - -@synchronize_ca_if_changed(force=True, fatal=True) -def identity_updates_with_forced_ssl_sync(): - identity_updates_with_ssl_sync() + return units @hooks.hook('cluster-relation-changed', @@ -314,19 +323,19 @@ def cluster_changed(): peer_interface='cluster', ensure_local_user=True) - synced_units = relation_get(attribute='ssl-synced-units', - unit=local_unit()) - if not synced_units or (remote_unit() not in synced_units): - log("Peer '%s' not in list of synced units (%s)" % - (remote_unit(), synced_units), level=INFO) - identity_updates_with_forced_ssl_sync() + if is_elected_leader(CLUSTER_RES): + new_peers = get_new_peers(peer_units()) + if new_peers: + log("New peers joined and need syncing - %s" % + (', '.join(new_peers)), level=DEBUG) + update_all_identity_relation_units_force_sync() + # Clear + relation_set(relation_settings={p: None for p in new_peers}) + else: + update_all_identity_relation_units() else: - identity_updates_with_ssl_sync() + CONFIGS.write_all() - echo_whitelist.append('ssl-synced-units') - - # ssl cert sync must be done BEFORE this to reduce the risk of feedback - # loops in cluster relation peer_echo(includes=echo_whitelist) diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 431653f8..a1c8a416 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -19,7 +19,6 @@ from charmhelpers.contrib.hahelpers.cluster import( is_elected_leader, determine_api_port, https, - peer_units, ) from charmhelpers.contrib.openstack import context, templating @@ -809,8 +808,7 @@ def synchronize_ca(fatal=False): level=DEBUG) log("Sync complete", level=DEBUG) - return {'restart-services-trigger': hash, - 'ssl-synced-units': peer_units()} + return {'restart-services-trigger': hash} def update_hash_from_path(hash, path, recurse_depth=10): diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index d3a1daef..02b4232d 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -34,7 +34,6 @@ TO_PATCH = [ 'relation_set', 'relation_get', 'related_units', - 'remote_unit', 'unit_get', 'peer_echo', # charmhelpers.core.host @@ -162,13 +161,11 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_db_changed_missing_relation_data(self, configs, mock_peer_units, + def test_db_changed_missing_relation_data(self, configs, mock_is_elected_leader, mock_log): mock_is_elected_leader.return_value = False - mock_peer_units.return_value = None configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] hooks.db_changed() @@ -205,13 +202,11 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_allowed(self, identity_changed, configs, - mock_peer_units, mock_is_elected_leader, + mock_is_elected_leader, mock_log): - mock_peer_units.return_value = None self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -226,13 +221,10 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_not_allowed(self, identity_changed, configs, - mock_peer_units, mock_is_elected_leader, - mock_log): - mock_peer_units.return_value = None + mock_is_elected_leader, mock_log): self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -245,13 +237,10 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_postgresql_db_changed(self, identity_changed, configs, - mock_peer_units, mock_is_elected_leader, - mock_log): - mock_peer_units.return_value = None + mock_is_elected_leader, mock_log): self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -389,10 +378,12 @@ class KeystoneRelationTests(CharmTestCase): 'identity-service:0', 'unit/0') + @patch.object(hooks, 'remote_unit') @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') def test_identity_changed_no_leader(self, mock_is_elected_leader, - mock_log): + mock_log, mock_remote_unit): + mock_remote_unit.return_value = 'unit/0' self.is_elected_leader.return_value = False hooks.identity_changed( relation_id='identity-service:0', @@ -401,13 +392,19 @@ class KeystoneRelationTests(CharmTestCase): self.log.assert_called_with( 'Deferring identity_changed() to service leader.') + @patch.object(hooks, 'remote_unit') + @patch.object(hooks, 'peer_units') @patch.object(unison, 'ssh_authorized_peers') - def test_cluster_joined(self, ssh_authorized_peers): + def test_cluster_joined(self, ssh_authorized_peers, mock_peer_units, + mock_remote_unit): + mock_remote_unit.return_value = 'unit/0' + mock_peer_units.return_value = ['unit/0'] hooks.cluster_joined() ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) + @patch.object(hooks, 'peer_units') @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.synchronize_ca') @@ -417,10 +414,11 @@ class KeystoneRelationTests(CharmTestCase): def test_cluster_changed(self, configs, ssh_authorized_peers, check_peer_actions, mock_synchronize_ca, mock_is_elected_leader, - mock_log): + mock_log, mock_peer_units): + mock_peer_units.return_value = ['unit/0'] mock_is_elected_leader.return_value = False hooks.cluster_changed() - whitelist = ['_passwd', 'identity-service:', 'ssl-synced-units'] + whitelist = ['_passwd', 'identity-service:'] self.peer_echo.assert_called_with(includes=whitelist) ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', @@ -477,15 +475,12 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_not_clustered_not_leader(self, configs, mock_synchronize_ca, - mock_peer_units, mock_is_leader, mock_log): - mock_peer_units.return_value = None mock_is_leader.return_value = False self.relation_get.return_value = False self.is_elected_leader.return_value = False @@ -496,15 +491,12 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_clustered_leader(self, configs, identity_changed, - mock_peer_units, mock_is_elected_leader, mock_log): - mock_peer_units.return_value = None mock_is_elected_leader.return_value = False self.relation_get.return_value = True self.is_elected_leader.return_value = True @@ -521,12 +513,9 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_configure_https_enable(self, configs, mock_peer_units, - mock_is_elected_leader, + def test_configure_https_enable(self, configs, mock_is_elected_leader, mock_log): - mock_peer_units.return_value = None mock_is_elected_leader.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['https'] @@ -539,12 +528,9 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.is_elected_leader') - @patch('keystone_utils.peer_units') @patch.object(hooks, 'CONFIGS') - def test_configure_https_disable(self, configs, mock_peer_units, - mock_is_elected_leader, + def test_configure_https_disable(self, configs, mock_is_elected_leader, mock_log): - mock_peer_units.return_value = None mock_is_elected_leader.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [''] From c05f6a044764b432627bf2e70722457da00f52b5 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 13 Jan 2015 22:16:46 +0000 Subject: [PATCH 10/13] validate echoed peer data --- hooks/keystone_hooks.py | 52 ++++++++++++++++++++++++++++--- hooks/keystone_utils.py | 15 +++++---- unit_tests/test_keystone_hooks.py | 9 ++++-- unit_tests/test_keystone_utils.py | 6 ++-- 4 files changed, 66 insertions(+), 16 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index eeea2923..5b554e2d 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -88,6 +88,12 @@ from charmhelpers.contrib.openstack.context import ADDRESS_TYPES from charmhelpers.contrib.charmsupport import nrpe +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + PUBLIC, + ADMIN, +) + hooks = Hooks() CONFIGS = register_configs() @@ -310,14 +316,52 @@ def get_new_peers(peers): return units +def apply_echo_filters(settings, echo_whitelist): + """Filter settings to be peer_echo'ed. + + We may have received some data that we don't want to re-echo so filter + out unwanted keys and provide overrides. + + Returns: + tuple(filtered list of keys to be echoed, overrides for keys omitted) + """ + filtered = [] + overrides = {} + for key in settings.iterkeys(): + for ekey in echo_whitelist: + if ekey in key: + if ekey == 'identity-service:': + auth_host = resolve_address(ADMIN) + service_host = resolve_address(PUBLIC) + if (key.endswith('auth_host') and + settings[key] != auth_host): + overrides[key] = auth_host + continue + elif (key.endswith('service_host') and + settings[key] != service_host): + overrides[key] = service_host + continue + + filtered.append(key) + + return filtered, overrides + + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) def cluster_changed(): - check_peer_actions() - + settings = relation_get() # NOTE(jamespage) re-echo passwords for peer storage - echo_whitelist = ['_passwd', 'identity-service:'] + echo_whitelist, overrides = \ + apply_echo_filters(settings, ['_passwd', 'identity-service:']) + log("Peer echo overrides: %s" % (overrides), level=DEBUG) + relation_set(**overrides) + if echo_whitelist: + log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG) + peer_echo(includes=echo_whitelist) + + check_peer_actions() unison.ssh_authorized_peers(user=SSH_USER, group='keystone', peer_interface='cluster', @@ -336,8 +380,6 @@ def cluster_changed(): else: CONFIGS.write_all() - peer_echo(includes=echo_whitelist) - @hooks.hook('ha-relation-joined') def ha_joined(): diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index a1c8a416..ea947a2f 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -672,7 +672,7 @@ def check_peer_actions(): source = res.group(1) action = res.group(2) - # Don't execute actions requested byu this unit. + # Don't execute actions requested by this unit. if local_unit().replace('.', '-') != source: if action == 'restart': log("Running action='%s' on service '%s'" % @@ -692,7 +692,10 @@ def check_peer_actions(): else: log("Unknown action flag=%s" % (flag), level=WARNING) - os.remove(flagfile) + try: + os.remove(flagfile) + except: + pass def create_peer_service_actions(action, services): @@ -1080,10 +1083,10 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): "service_password": service_password, "service_tenant": service_tenant, "service_tenant_id": manager.resolve_tenant_id(service_tenant), - "https_keystone": None, - "ssl_cert": None, - "ssl_key": None, - "ca_cert": None + "https_keystone": "False", + "ssl_cert": "", + "ssl_key": "", + "ca_cert": "" } # Check if https is enabled diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 02b4232d..4f971012 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -42,6 +42,8 @@ TO_PATCH = [ 'restart_on_change', # charmhelpers.contrib.openstack.utils 'configure_installation_source', + # charmhelpers.contrib.openstack.ip + 'resolve_address', # charmhelpers.contrib.hahelpers.cluster_utils 'is_elected_leader', 'get_hacluster_config', @@ -417,9 +419,12 @@ class KeystoneRelationTests(CharmTestCase): mock_log, mock_peer_units): mock_peer_units.return_value = ['unit/0'] mock_is_elected_leader.return_value = False + self.is_elected_leader.return_value = False + self.relation_get.return_value = {'foo_passwd': '123', + 'identity-service:16_foo': 'bar'} hooks.cluster_changed() - whitelist = ['_passwd', 'identity-service:'] - self.peer_echo.assert_called_with(includes=whitelist) + self.peer_echo.assert_called_with(includes=['foo_passwd', + 'identity-service:16_foo']) ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='keystone', peer_interface='cluster', ensure_local_user=True) diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index b94f0f26..23608f0b 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -231,9 +231,9 @@ class TestKeystoneUtils(CharmTestCase): 'auth_port': 80, 'service_username': 'keystone', 'service_password': 'password', 'service_tenant': 'tenant', - 'https_keystone': None, - 'ssl_cert': None, 'ssl_key': None, - 'ca_cert': None, 'auth_host': '10.0.0.3', + 'https_keystone': 'False', + 'ssl_cert': '', 'ssl_key': '', + 'ca_cert': '', 'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', 'auth_protocol': 'http', 'service_protocol': 'http', 'service_tenant_id': 'tenant_id'} From 93d095375852f6614333a1a8fd3edaddba3cd4fd Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 16 Jan 2015 14:02:29 +0000 Subject: [PATCH 11/13] add sync-master stickiness back into the mix --- hooks/keystone_hooks.py | 30 +++----- hooks/keystone_utils.py | 93 ++++++++++++++++++++++++- unit_tests/test_keystone_hooks.py | 109 ++++++++++++++++-------------- 3 files changed, 161 insertions(+), 71 deletions(-) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 5b554e2d..1dd92a10 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -24,7 +24,6 @@ from charmhelpers.core.hookenv import ( relation_ids, relation_set, related_units, - remote_unit, unit_get, ) @@ -64,6 +63,8 @@ from keystone_utils import ( check_peer_actions, CA_CERT_PATH, ensure_permissions, + get_ssl_sync_request_units, + clear_ssl_sync_requests, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -299,23 +300,12 @@ def cluster_joined(): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] settings['private-address'] = private_addr - # This will be consumed by -changed for ssl sync - settings['ssl-sync-required-%s' % (remote_unit().replace('/', '-'))] = '1' + # This will be consumed by cluster-relation-changed ssl master + settings['ssl-sync-required-%s' % (local_unit().replace('/', '-'))] = '1' relation_set(relation_settings=settings) -def get_new_peers(peers): - units = [] - key = re.compile("^ssl-sync-required-(.+)") - for peer in peers: - res = re.search(key, peer) - if res: - units.append(res.group(1)) - - return units - - def apply_echo_filters(settings, echo_whitelist): """Filter settings to be peer_echo'ed. @@ -354,7 +344,8 @@ def cluster_changed(): settings = relation_get() # NOTE(jamespage) re-echo passwords for peer storage echo_whitelist, overrides = \ - apply_echo_filters(settings, ['_passwd', 'identity-service:']) + apply_echo_filters(settings, ['_passwd', 'identity-service:', + 'ssl-cert-master']) log("Peer echo overrides: %s" % (overrides), level=DEBUG) relation_set(**overrides) if echo_whitelist: @@ -368,13 +359,12 @@ def cluster_changed(): ensure_local_user=True) if is_elected_leader(CLUSTER_RES): - new_peers = get_new_peers(peer_units()) - if new_peers: + units = get_ssl_sync_request_units(settings.keys()) + if units: log("New peers joined and need syncing - %s" % - (', '.join(new_peers)), level=DEBUG) + (', '.join(units)), level=DEBUG) update_all_identity_relation_units_force_sync() - # Clear - relation_set(relation_settings={p: None for p in new_peers}) + clear_ssl_sync_requests(units) else: update_all_identity_relation_units() else: diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index ea947a2f..bf08c5af 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -19,6 +19,8 @@ from charmhelpers.contrib.hahelpers.cluster import( is_elected_leader, determine_api_port, https, + peer_units, + oldest_peer, ) from charmhelpers.contrib.openstack import context, templating @@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import ( relation_get, relation_set, relation_ids, + related_units, DEBUG, INFO, WARNING, @@ -736,6 +739,82 @@ def unison_sync(paths_to_sync): fatal=True) +def get_ssl_sync_request_units(rkeys): + units = [] + key = re.compile("^ssl-sync-required-(.+)") + for rkey in rkeys: + res = re.search(key, rkey) + if res: + units.append(res.group(1)) + + return units + + +def clear_ssl_sync_requests(units): + settings = {} + for unit in units: + settings['ssl-sync-required-%s' % (unit)] = None + + relation_set(settings=settings) + + +def is_ssl_cert_master(): + """Return True if this unit is ssl cert master.""" + master = None + for rid in relation_ids('cluster'): + master = relation_get(attribute='ssl-cert-master', rid=rid, + unit=local_unit()) + + return master == local_unit() + + +def ensure_ssl_cert_master(use_oldest_peer=False): + """Ensure that an ssl cert master has been elected. + + Normally the cluster leader will take control but we allow for this to be + ignored since this could be called before the cluster is ready. + """ + if not peer_units(): + log("Not syncing certs since there are no peer units.", level=INFO) + return False + + if use_oldest_peer: + elect = oldest_peer(peer_units()) + else: + elect = is_elected_leader(CLUSTER_RES) + + if elect: + masters = [] + for rid in relation_ids('cluster'): + for unit in related_units(rid): + m = relation_get(rid=rid, unit=unit, + attribute='ssl-cert-master') + if m is not None: + masters.append(m) + + # We expect all peers to echo this setting + if not masters or 'unknown' in masters: + log("Notifying peers this unit is ssl-cert-master", level=INFO) + for rid in relation_ids('cluster'): + settings = {'ssl-cert-master': local_unit()} + relation_set(relation_id=rid, relation_settings=settings) + + # Return now and wait for cluster-relation-changed (peer_echo) for + # sync. + return False + elif len(set(masters)) != 1 and local_unit() not in masters: + log("Did not get concensus from peers on who is master (%s) - " + "waiting for current master to release before self-electing" % + (masters), level=INFO) + return False + + if not is_ssl_cert_master(): + log("Not ssl cert master - skipping sync", level=INFO) + return False + + return True + + def synchronize_ca(fatal=False): """Broadcast service credentials to peers. @@ -847,7 +926,7 @@ def synchronize_ca_if_changed(force=False, fatal=False): log("Nested sync - ignoring", level=DEBUG) return f(*args, **kwargs) - if not is_elected_leader(CLUSTER_RES): + if not ensure_ssl_cert_master(): log("Not leader - ignoring sync", level=DEBUG) return f(*args, **kwargs) @@ -877,6 +956,12 @@ def synchronize_ca_if_changed(force=False, fatal=False): log("Doing forced ssl cert sync", level=DEBUG) peer_settings = synchronize_ca(fatal=fatal) + # If we are the sync master but not leader, ensure we have + # relinquished master status. + if not is_elected_leader(CLUSTER_RES): + log("Re-electing ssl cert master.", level=INFO) + peer_settings['ssl-cert-master'] = 'unknown' + if peer_settings: for rid in relation_ids('cluster'): relation_set(relation_id=rid, @@ -913,6 +998,12 @@ def get_ca(user='keystone', group='keystone'): '%s' % SSL_DIR]) subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR]) + # Ensure a master has been elected and prefer this unit. Note that we + # prefer oldest peer as predicate since this action i normally only + # performed once at deploy time when the oldest peer should be the + # first to be ready. + ensure_ssl_cert_master(use_oldest_peer=True) + ssl.CA_SINGLETON.append(ca) return ssl.CA_SINGLETON[0] diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 4f971012..62182701 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -162,12 +162,12 @@ class KeystoneRelationTests(CharmTestCase): 'is already associated a mysql one') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') def test_db_changed_missing_relation_data(self, configs, - mock_is_elected_leader, + mock_ensure_ssl_cert_master, mock_log): - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] hooks.db_changed() @@ -176,11 +176,12 @@ class KeystoneRelationTests(CharmTestCase): ) @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') def test_postgresql_db_changed_missing_relation_data(self, configs, - mock_is_leader, + mock_ensure_leader, mock_log): + mock_ensure_leader.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] hooks.pgsql_db_changed() @@ -203,12 +204,13 @@ class KeystoneRelationTests(CharmTestCase): hooks.pgsql_db_changed() @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_allowed(self, identity_changed, configs, - mock_is_elected_leader, + mock_ensure_ssl_cert_master, mock_log): + mock_ensure_ssl_cert_master.return_value = False self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -222,11 +224,12 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_not_allowed(self, identity_changed, configs, - mock_is_elected_leader, mock_log): + mock_ensure_ssl_cert_master, mock_log): + mock_ensure_ssl_cert_master.return_value = False self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -238,11 +241,12 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(identity_changed.called) @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_postgresql_db_changed(self, identity_changed, configs, - mock_is_elected_leader, mock_log): + mock_ensure_ssl_cert_master, mock_log): + mock_ensure_ssl_cert_master.return_value = False self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -256,7 +260,7 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'peer_units') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @@ -268,12 +272,12 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_no_openstack_upgrade_leader( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions, mock_peer_units, mock_is_elected_leader, + ensure_permissions, mock_peer_units, mock_ensure_ssl_cert_master, mock_log): self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = True # avoid having to mock syncer - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False mock_peer_units.return_value = [] self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -295,7 +299,7 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -306,11 +310,11 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_no_openstack_upgrade_not_leader( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions, mock_is_elected_leader, + ensure_permissions, mock_ensure_ssl_cert_master, mock_log): self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = False - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False hooks.config_changed() ensure_user.assert_called_with(user=self.ssh_user, group='keystone') @@ -325,7 +329,7 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(identity_changed.called) @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'peer_units') @patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'cluster_joined') @@ -337,12 +341,12 @@ class KeystoneRelationTests(CharmTestCase): def test_config_changed_with_openstack_upgrade( self, configure_https, identity_changed, configs, get_homedir, ensure_user, cluster_joined, - ensure_permissions, mock_peer_units, mock_is_elected_leader, + ensure_permissions, mock_peer_units, mock_ensure_ssl_cert_master, mock_log): self.openstack_upgrade_available.return_value = True self.is_elected_leader.return_value = True # avoid having to mock syncer - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False mock_peer_units.return_value = [] self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -366,13 +370,13 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'hashlib') @patch.object(hooks, 'send_notifications') def test_identity_changed_leader(self, mock_send_notifications, - mock_hashlib, mock_is_elected_leader, + mock_hashlib, mock_ensure_ssl_cert_master, mock_log): - mock_is_elected_leader.return_value = True + mock_ensure_ssl_cert_master.return_value = False hooks.identity_changed( relation_id='identity-service:0', remote_unit='unit/0') @@ -380,12 +384,13 @@ class KeystoneRelationTests(CharmTestCase): 'identity-service:0', 'unit/0') - @patch.object(hooks, 'remote_unit') + @patch.object(hooks, 'local_unit') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') - def test_identity_changed_no_leader(self, mock_is_elected_leader, - mock_log, mock_remote_unit): - mock_remote_unit.return_value = 'unit/0' + @patch('keystone_utils.ensure_ssl_cert_master') + def test_identity_changed_no_leader(self, mock_ensure_ssl_cert_master, + mock_log, mock_local_unit): + mock_ensure_ssl_cert_master.return_value = False + mock_local_unit.return_value = 'unit/0' self.is_elected_leader.return_value = False hooks.identity_changed( relation_id='identity-service:0', @@ -394,12 +399,12 @@ class KeystoneRelationTests(CharmTestCase): self.log.assert_called_with( 'Deferring identity_changed() to service leader.') - @patch.object(hooks, 'remote_unit') + @patch.object(hooks, 'local_unit') @patch.object(hooks, 'peer_units') @patch.object(unison, 'ssh_authorized_peers') def test_cluster_joined(self, ssh_authorized_peers, mock_peer_units, - mock_remote_unit): - mock_remote_unit.return_value = 'unit/0' + mock_local_unit): + mock_local_unit.return_value = 'unit/0' mock_peer_units.return_value = ['unit/0'] hooks.cluster_joined() ssh_authorized_peers.assert_called_with( @@ -408,17 +413,17 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'peer_units') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'check_peer_actions') @patch.object(unison, 'ssh_authorized_peers') @patch.object(hooks, 'CONFIGS') def test_cluster_changed(self, configs, ssh_authorized_peers, check_peer_actions, mock_synchronize_ca, - mock_is_elected_leader, + mock_ensure_ssl_cert_master, mock_log, mock_peer_units): mock_peer_units.return_value = ['unit/0'] - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False self.is_elected_leader.return_value = False self.relation_get.return_value = {'foo_passwd': '123', 'identity-service:16_foo': 'bar'} @@ -479,14 +484,14 @@ class KeystoneRelationTests(CharmTestCase): self.relation_set.assert_called_with(**args) @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_not_clustered_not_leader(self, configs, mock_synchronize_ca, - mock_is_leader, + mock_is_master, mock_log): - mock_is_leader.return_value = False + mock_is_master.return_value = False self.relation_get.return_value = False self.is_elected_leader.return_value = False @@ -495,14 +500,14 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(mock_synchronize_ca.called) @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_clustered_leader(self, configs, identity_changed, - mock_is_elected_leader, + mock_ensure_ssl_cert_master, mock_log): - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False self.relation_get.return_value = True self.is_elected_leader.return_value = True self.relation_ids.return_value = ['identity-service:0'] @@ -517,11 +522,11 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') - def test_configure_https_enable(self, configs, mock_is_elected_leader, + def test_configure_https_enable(self, configs, mock_ensure_ssl_cert_master, mock_log): - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['https'] configs.write = MagicMock() @@ -532,11 +537,12 @@ class KeystoneRelationTests(CharmTestCase): self.check_call.assert_called_with(cmd) @patch('keystone_utils.log') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') - def test_configure_https_disable(self, configs, mock_is_elected_leader, + def test_configure_https_disable(self, configs, + mock_ensure_ssl_cert_master, mock_log): - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [''] configs.write = MagicMock() @@ -549,17 +555,20 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.update_hash_from_path') @patch('keystone_utils.synchronize_ca') @patch.object(unison, 'ssh_authorized_peers') def test_upgrade_charm_leader(self, ssh_authorized_peers, mock_synchronize_ca, mock_update_hash_from_path, + mock_ensure_ssl_cert_master, mock_is_elected_leader, mock_relation_ids, mock_log): + mock_is_elected_leader.return_value = False mock_relation_ids.return_value = [] - mock_is_elected_leader.return_value = True + mock_ensure_ssl_cert_master.return_value = True # Ensure always returns diff mock_update_hash_from_path.side_effect = \ lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) @@ -578,16 +587,16 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') - @patch('keystone_utils.is_elected_leader') + @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.update_hash_from_path') @patch.object(unison, 'ssh_authorized_peers') def test_upgrade_charm_not_leader(self, ssh_authorized_peers, mock_update_hash_from_path, - mock_is_elected_leader, + mock_ensure_ssl_cert_master, mock_relation_ids, mock_log): mock_relation_ids.return_value = [] - mock_is_elected_leader.return_value = False + mock_ensure_ssl_cert_master.return_value = False # Ensure always returns diff mock_update_hash_from_path.side_effect = \ lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) From b6def2a72c81736998fdc3fb321fbc0296e5a74c Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 20 Jan 2015 17:07:58 +0000 Subject: [PATCH 12/13] updated README --- README.md | 56 +++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 7e2935ff..a36659a4 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,30 @@ -This charm provides Keystone, the Openstack identity service. It's target -platform is Ubuntu Precise + Openstack Essex. This has not been tested -using Oneiric + Diablo. +Overview +======== + +This charm provides Keystone, the Openstack identity service. It's target +platform is (ideally) Ubuntu LTS + Openstack. + +Usage +===== + +The following interfaces are provided: + + - nrpe-external-master: Used generate Nagios checks. -It provides three interfaces. - - identity-service: Openstack API endpoints request an entry in the Keystone service catalog + endpoint template catalog. When a relation is established, Keystone receives: service name, region, public_url, admin_url and internal_url. It first checks that the requested service is listed as a supported service. This list should stay updated to - support current Openstack core services. If the services is supported, - a entry in the service catalog is created, an endpoint template is + support current Openstack core services. If the service is supported, + an entry in the service catalog is created, an endpoint template is created and a admin token is generated. The other end of the relation - recieves the token as well as info on which ports Keystone is listening. + receives the token as well as info on which ports Keystone is listening + on. - keystone-service: This is currently only used by Horizon/dashboard as its interaction with Keystone is different from other Openstack API - servicies. That is, Horizon requests a Keystone role and token exists. + services. That is, Horizon requests a Keystone role and token exists. During a relation, Horizon requests its configured default role and Keystone responds with a token and the auth + admin ports on which Keystone is listening. @@ -26,9 +34,37 @@ It provides three interfaces. provision users, tenants, etc. or that otherwise automate using the Openstack cluster deployment. + - identity-notifications: Used to broadcast messages to any services + listening on the interface. + +Database +-------- + Keystone requires a database. By default, a local sqlite database is used. The charm supports relations to a shared-db via mysql-shared interface. When a new data store is configured, the charm ensures the minimum administrator credentials exist (as configured via charm configuration) -VIP is only required if you plan on multi-unit clusterming. The VIP becomes a highly-available API endpoint. +HA/Clustering +------------- + +VIP is only required if you plan on multi-unit clustering (requires relating +with hacluster charm). The VIP becomes a highly-available API endpoint. + +SSL/HTTPS +--------- + +This charm also supports SSL and HTTPS endpoints. In order to ensure SSL +certificates are only created once and distributed to all units, one unit gets +elected as an ssl-cert-master. One side-effect of this is that as units are +scaled-out the currently elected leader needs to be running in order for nodes +to sync certificates. This 'feature' is to work around the lack of native +leadership election via Juju itself, a feature that is due for release some +time soon but until then we have to rely on this. Also, if a keystone unit does +go down, it must be removed from Juju i.e. + + juju destroy-unit keystone/ + +Otherwise it will be assumed that this unit may come back at some point and +therefore must be know to be in-sync with the rest before continuing. + From 208fd9ea7e38238b22fa6901eb5aa0b9c344fefe Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 21 Jan 2015 16:23:15 +0000 Subject: [PATCH 13/13] ignore ssl actions if not enabled and improve support for non-ssl -> ssl --- hooks/keystone_context.py | 15 +++++----- hooks/keystone_hooks.py | 38 ++++++++++++++++++----- hooks/keystone_utils.py | 50 ++++++++++++++++++------------- unit_tests/test_keystone_hooks.py | 5 +++- 4 files changed, 73 insertions(+), 35 deletions(-) diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py index 0d86c3db..5cedbc4a 100644 --- a/hooks/keystone_context.py +++ b/hooks/keystone_context.py @@ -9,7 +9,6 @@ from charmhelpers.contrib.openstack import context from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, - is_elected_leader, ) from charmhelpers.core.hookenv import ( @@ -38,8 +37,8 @@ class ApacheSSLContext(context.ApacheSSLContext): from keystone_utils import ( SSH_USER, get_ca, - CLUSTER_RES, ensure_permissions, + is_ssl_cert_master, ) ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) @@ -49,8 +48,9 @@ class ApacheSSLContext(context.ApacheSSLContext): ensure_permissions(ssl_dir, user=SSH_USER, group='keystone', perms=perms) - if not is_elected_leader(CLUSTER_RES): - log("Not leader - skipping apache cert config", level=INFO) + if not is_ssl_cert_master(): + log("Not ssl-cert-master - skipping apache cert config", + level=INFO) return log("Creating apache ssl certs in %s" % (ssl_dir), level=INFO) @@ -66,12 +66,13 @@ class ApacheSSLContext(context.ApacheSSLContext): from keystone_utils import ( SSH_USER, get_ca, - CLUSTER_RES, ensure_permissions, + is_ssl_cert_master, ) - if not is_elected_leader(CLUSTER_RES): - log("Not leader - skipping apache cert config", level=INFO) + if not is_ssl_cert_master(): + log("Not ssl-cert-master - skipping apache cert config", + level=INFO) return ca = get_ca(user=SSH_USER) diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index d27ca523..6c6096e6 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -1,5 +1,6 @@ #!/usr/bin/python import hashlib +import json import os import re import stat @@ -64,7 +65,8 @@ from keystone_utils import ( CA_CERT_PATH, ensure_permissions, get_ssl_sync_request_units, - clear_ssl_sync_requests, + is_str_true, + is_ssl_cert_master, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -147,6 +149,13 @@ def config_changed(): for rid in relation_ids('identity-admin'): admin_relation_changed(rid) + # Ensure sync request is sent out (needed for upgrade to ssl from non-ssl) + settings = {} + append_ssl_sync_request(settings) + if settings: + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, relation_settings=settings) + @hooks.hook('shared-db-relation-joined') def db_joined(): @@ -281,6 +290,17 @@ def identity_changed(relation_id=None, remote_unit=None): send_notifications(notifications) +def append_ssl_sync_request(settings): + """Add request to be synced to relation settings. + + This will be consumed by cluster-relation-changed ssl master. + """ + if (is_str_true(config('use-https')) or + is_str_true(config('https-service-endpoints'))): + unit = local_unit().replace('/', '-') + settings['ssl-sync-required-%s' % (unit)] = '1' + + @hooks.hook('cluster-relation-joined') def cluster_joined(): unison.ssh_authorized_peers(user=SSH_USER, @@ -301,8 +321,7 @@ def cluster_joined(): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] settings['private-address'] = private_addr - # This will be consumed by cluster-relation-changed ssl master - settings['ssl-sync-required-%s' % (local_unit().replace('/', '-'))] = '1' + append_ssl_sync_request(settings) relation_set(relation_settings=settings) @@ -359,13 +378,18 @@ def cluster_changed(): peer_interface='cluster', ensure_local_user=True) - if is_elected_leader(CLUSTER_RES): - units = get_ssl_sync_request_units(settings.keys()) - if units: + if is_elected_leader(CLUSTER_RES) or is_ssl_cert_master(): + units = get_ssl_sync_request_units() + synced_units = relation_get(attribute='ssl-synced-units', + unit=local_unit()) + if synced_units: + synced_units = json.loads(synced_units) + diff = set(units).symmetric_difference(set(synced_units)) + + if units and (not synced_units or diff): log("New peers joined and need syncing - %s" % (', '.join(units)), level=DEBUG) update_all_identity_relation_units_force_sync() - clear_ssl_sync_requests(units) else: update_all_identity_relation_units() diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index a17defdc..79fb389c 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -2,6 +2,7 @@ import glob import grp import hashlib +import json import os import pwd import re @@ -220,7 +221,7 @@ valid_services = { } -def str_is_true(value): +def is_str_true(value): if value and value.lower() in ['true', 'yes']: return True @@ -766,25 +767,24 @@ def unison_sync(paths_to_sync): fatal=True) -def get_ssl_sync_request_units(rkeys): +def get_ssl_sync_request_units(): + """Get list of units that have requested to be synced. + + NOTE: this must be called from cluster relation context. + """ units = [] - key = re.compile("^ssl-sync-required-(.+)") - for rkey in rkeys: - res = re.search(key, rkey) - if res: - units.append(res.group(1)) + for unit in related_units(): + settings = relation_get(unit=unit) or {} + rkeys = settings.keys() + key = re.compile("^ssl-sync-required-(.+)") + for rkey in rkeys: + res = re.search(key, rkey) + if res: + units.append(res.group(1)) return units -def clear_ssl_sync_requests(units): - settings = {} - for unit in units: - settings['ssl-sync-required-%s' % (unit)] = None - - relation_set(settings=settings) - - def is_ssl_cert_master(): """Return True if this unit is ssl cert master.""" master = None @@ -801,6 +801,12 @@ def ensure_ssl_cert_master(use_oldest_peer=False): Normally the cluster leader will take control but we allow for this to be ignored since this could be called before the cluster is ready. """ + # Don't do anything if we are not in ssl/https mode + if not (is_str_true(config('use-https')) or + is_str_true(config('https-service-endpoints'))): + log("SSL/HTTPS is NOT enabled", level=DEBUG) + return False + if not peer_units(): log("Not syncing certs since there are no peer units.", level=INFO) return False @@ -855,13 +861,13 @@ def synchronize_ca(fatal=False): """ paths_to_sync = [SYNC_FLAGS_DIR] - if str_is_true(config('https-service-endpoints')): + if is_str_true(config('https-service-endpoints')): log("Syncing all endpoint certs since https-service-endpoints=True", level=DEBUG) paths_to_sync.append(SSL_DIR) paths_to_sync.append(APACHE_SSL_DIR) paths_to_sync.append(CA_CERT_PATH) - elif str_is_true(config('use-https')): + elif is_str_true(config('use-https')): log("Syncing keystone-endpoint certs since use-https=True", level=DEBUG) paths_to_sync.append(APACHE_SSL_DIR) @@ -879,6 +885,9 @@ def synchronize_ca(fatal=False): create_peer_service_actions('restart', ['apache2']) create_peer_actions(['update-ca-certificates']) + # Format here needs to match that used when peers request sync + synced_units = [unit.replace('/', '-') for unit in peer_units()] + retries = 3 while True: hash1 = hashlib.sha256() @@ -917,7 +926,8 @@ def synchronize_ca(fatal=False): level=DEBUG) log("Sync complete", level=DEBUG) - return {'restart-services-trigger': hash} + return {'restart-services-trigger': hash, + 'ssl-synced-units': json.dumps(synced_units)} def update_hash_from_path(hash, path, recurse_depth=10): @@ -1075,7 +1085,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): relation_data["auth_port"] = config('admin-port') relation_data["service_port"] = config('service-port') relation_data["region"] = config('region') - if str_is_true(config('https-service-endpoints')): + if is_str_true(config('https-service-endpoints')): # Pass CA cert as client will need it to # verify https connections ca = get_ca(user=SSH_USER) @@ -1215,7 +1225,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): relation_data["auth_protocol"] = "http" relation_data["service_protocol"] = "http" # generate or get a new cert/key for service if set to manage certs. - if str_is_true(config('https-service-endpoints')): + if is_str_true(config('https-service-endpoints')): ca = get_ca(user=SSH_USER) # NOTE(jamespage) may have multiple cns to deal with to iterate https_cns = set(https_cns) diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 8b44e56c..cba69d48 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -416,6 +416,7 @@ class KeystoneRelationTests(CharmTestCase): user=self.ssh_user, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) + @patch.object(hooks, 'is_ssl_cert_master') @patch.object(hooks, 'peer_units') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @@ -426,7 +427,9 @@ class KeystoneRelationTests(CharmTestCase): def test_cluster_changed(self, configs, ssh_authorized_peers, check_peer_actions, mock_synchronize_ca, mock_ensure_ssl_cert_master, - mock_log, mock_peer_units): + mock_log, mock_peer_units, + mock_is_ssl_cert_master): + mock_is_ssl_cert_master.return_value = False mock_peer_units.return_value = ['unit/0'] mock_ensure_ssl_cert_master.return_value = False self.is_elected_leader.return_value = False