[hopem,r=]

Fixes ssl cert sycnhronisation across peers

Closes-Bug: 1317782
This commit is contained in:
Edward Hope-Morley 2015-01-05 17:49:38 +00:00
parent 9e0062328a
commit 14f39ff133
7 changed files with 440 additions and 110 deletions

@ -228,7 +228,12 @@ def collect_authed_hosts(peer_interface):
return hosts
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None):
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None,
fatal=False):
"""Sync path to an specific peer host
Propagates exception if operation fails and fatal=True.
"""
cmd = cmd or copy(BASE_CMD)
if not verbose:
cmd.append('-silent')
@ -245,20 +250,30 @@ def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None):
run_as_user(user, cmd, gid)
except:
log('Error syncing remote files')
if fatal:
raise
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None):
'''Sync paths to an specific host'''
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None,
fatal=False):
"""Sync paths to an specific peer host
Propagates exception if any operation fails and fatal=True.
"""
if paths:
for p in paths:
sync_path_to_host(p, host, user, verbose, cmd, gid)
sync_path_to_host(p, host, user, verbose, cmd, gid, fatal)
def sync_to_peers(peer_interface, user, paths=None,
verbose=False, cmd=None, gid=None):
'''Sync all hosts to an specific path'''
'''The type of group is integer, it allows user has permissions to '''
'''operate a directory have a different group id with the user id.'''
def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None,
gid=None, fatal=False):
"""Sync all hosts to an specific path
The type of group is integer, it allows user has permissions to
operate a directory have a different group id with the user id.
Propagates exception if any operation fails and fatal=True.
"""
if paths:
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose, cmd, gid)
sync_to_peer(host, user, paths, verbose, cmd, gid, fatal)

@ -1,3 +1,5 @@
import os
from charmhelpers.core.hookenv import config
from charmhelpers.core.host import mkdir, write_file
@ -9,9 +11,12 @@ from charmhelpers.contrib.hahelpers.cluster import (
determine_api_port
)
from charmhelpers.contrib.hahelpers.apache import install_ca_cert
from charmhelpers.core.hookenv import (
log,
INFO,
)
import os
from charmhelpers.contrib.hahelpers.apache import install_ca_cert
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -29,20 +34,52 @@ class ApacheSSLContext(context.ApacheSSLContext):
return super(ApacheSSLContext, self).__call__()
def configure_cert(self, cn):
from keystone_utils import SSH_USER, get_ca
from keystone_utils import (
SSH_USER,
get_ca,
is_ssl_cert_master,
ensure_permissions,
)
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
mkdir(path=ssl_dir)
perms = 0o755
mkdir(path=ssl_dir, owner=SSH_USER, group='keystone', perms=perms)
# Ensure accessible by keystone ssh user and group (for sync)
ensure_permissions(ssl_dir, user=SSH_USER, group='keystone',
perms=perms)
if not is_ssl_cert_master():
log("Not leader or cert master so skipping apache cert config",
level=INFO)
return
log("Creating apache ssl certs in %s" % (ssl_dir), level=INFO)
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=cn)
write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)),
content=cert)
content=cert, owner=SSH_USER, group='keystone', perms=0o644)
write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)),
content=key)
content=key, owner=SSH_USER, group='keystone', perms=0o644)
def configure_ca(self):
from keystone_utils import SSH_USER, get_ca
from keystone_utils import (
SSH_USER,
get_ca,
is_ssl_cert_master,
ensure_permissions,
)
if not is_ssl_cert_master():
log("Not leader or cert master so skipping apache ca config",
level=INFO)
return
ca = get_ca(user=SSH_USER)
install_ca_cert(ca.get_ca_bundle())
# Ensure accessible by keystone ssh user and group (unison)
ensure_permissions(CA_CERT_PATH, user=SSH_USER, group='keystone',
perms=0o0644)
def canonical_names(self):
addresses = self.get_network_addresses()

@ -1,7 +1,8 @@
#!/usr/bin/python
import hashlib
import os
import re
import stat
import sys
import time
@ -16,6 +17,7 @@ from charmhelpers.core.hookenv import (
is_relation_made,
log,
local_unit,
WARNING,
ERROR,
relation_get,
relation_ids,
@ -57,11 +59,13 @@ from keystone_utils import (
STORED_PASSWD,
setup_ipv6,
send_notifications,
check_peer_actions,
CA_CERT_PATH,
ensure_permissions,
)
from charmhelpers.contrib.hahelpers.cluster import (
eligible_leader,
is_leader,
is_elected_leader,
get_hacluster_config,
)
@ -109,10 +113,19 @@ def config_changed():
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
# Ensure unison can write to certs dir.
# FIXME: need to a better way around this e.g. move cert to it's own dir
# and give that unison permissions.
path = os.path.dirname(CA_CERT_PATH)
perms = int(oct(stat.S_IMODE(os.stat(path).st_mode) |
(stat.S_IWGRP | stat.S_IXGRP)), base=8)
ensure_permissions(path, group='keystone', perms=perms)
save_script_rc()
configure_https()
CONFIGS.write_all()
if eligible_leader(CLUSTER_RES):
if is_elected_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
log('Firing identity_changed hook for all related services.')
@ -121,7 +134,9 @@ def config_changed():
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
remote_unit=unit, sync_certs=False)
synchronize_ca()
[cluster_joined(rid) for rid in relation_ids('cluster')]
@ -163,7 +178,7 @@ def db_changed():
log('shared-db relation incomplete. Peer not ready?')
else:
CONFIGS.write(KEYSTONE_CONF)
if eligible_leader(CLUSTER_RES):
if is_elected_leader(CLUSTER_RES):
# Bugs 1353135 & 1187508. Dbs can appear to be ready before the
# units acl entry has been added. So, if the db supports passing
# a list of permitted units then check if we're in the list.
@ -188,7 +203,7 @@ def pgsql_db_changed():
log('pgsql-db relation incomplete. Peer not ready?')
else:
CONFIGS.write(KEYSTONE_CONF)
if eligible_leader(CLUSTER_RES):
if is_elected_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
# Ensure any existing service entries are updated in the
@ -199,11 +214,27 @@ def pgsql_db_changed():
@hooks.hook('identity-service-relation-changed')
def identity_changed(relation_id=None, remote_unit=None):
def identity_changed(relation_id=None, remote_unit=None, sync_certs=True):
notifications = {}
if eligible_leader(CLUSTER_RES):
add_service_to_keystone(relation_id, remote_unit)
synchronize_ca()
if is_elected_leader(CLUSTER_RES):
# Catch database not configured error and defer until db ready
from keystoneclient.apiclient.exceptions import InternalServerError
try:
add_service_to_keystone(relation_id, remote_unit)
except InternalServerError as exc:
key = re.compile("'keystone\..+' doesn't exist")
if re.search(key, exc.message):
log("Keystone database not yet ready (InternalServerError "
"raised) - deferring until *-db relation completes.",
level=WARNING)
return
log("Unexpected exception occurred", level=ERROR)
raise
CONFIGS.write_all()
if sync_certs:
synchronize_ca()
settings = relation_get(rid=relation_id, unit=remote_unit)
service = settings.get('service', None)
@ -257,18 +288,22 @@ def cluster_joined(relation_id=None):
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
check_peer_actions()
# NOTE(jamespage) re-echo passwords for peer storage
peer_echo(includes=['_passwd', 'identity-service:'])
echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master']
peer_echo(includes=echo_whitelist)
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
ensure_local_user=True)
synchronize_ca()
CONFIGS.write_all()
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
identity_changed(relation_id=r_id, remote_unit=unit,
sync_certs=False)
synchronize_ca()
@hooks.hook('ha-relation-joined')
@ -325,14 +360,16 @@ def ha_joined():
def ha_changed():
clustered = relation_get('clustered')
CONFIGS.write_all()
if (clustered is not None and
is_leader(CLUSTER_RES)):
if clustered is not None and is_elected_leader(CLUSTER_RES):
ensure_initial_admin(config)
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
identity_changed(relation_id=rid, remote_unit=unit,
sync_certs=False)
synchronize_ca()
@hooks.hook('identity-admin-relation-changed')
@ -375,8 +412,7 @@ def upgrade_charm():
group='keystone',
peer_interface='cluster',
ensure_local_user=True)
synchronize_ca()
if eligible_leader(CLUSTER_RES):
if is_elected_leader(CLUSTER_RES):
log('Cluster leader - ensuring endpoint configuration'
' is up to date')
time.sleep(10)
@ -385,8 +421,9 @@ def upgrade_charm():
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
remote_unit=unit, sync_certs=False)
CONFIGS.write_all()
synchronize_ca()
def main():

@ -101,6 +101,9 @@ keyUsage = digitalSignature, keyEncipherment, keyAgreement
extendedKeyUsage = serverAuth, clientAuth
"""
# Instance can be appended to this list to represent a singleton
CA_SINGLETON = []
def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
print 'Ensuring certificate authority exists at %s.' % ca_dir

@ -1,8 +1,12 @@
#!/usr/bin/python
import glob
import grp
import subprocess
import os
import pwd
import uuid
import urlparse
import shutil
import time
from base64 import b64encode
@ -10,11 +14,11 @@ from collections import OrderedDict
from copy import deepcopy
from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader,
is_elected_leader,
determine_api_port,
https,
is_clustered,
is_elected_leader,
peer_units,
)
from charmhelpers.contrib.openstack import context, templating
@ -37,8 +41,17 @@ from charmhelpers.contrib.openstack.utils import (
os_release,
save_script_rc as _save_script_rc)
from charmhelpers.core.host import (
mkdir,
write_file,
)
import charmhelpers.contrib.unison as unison
from charmhelpers.core.decorators import (
retry_on_exception,
)
from charmhelpers.core.hookenv import (
config,
log,
@ -46,8 +59,10 @@ from charmhelpers.core.hookenv import (
relation_get,
relation_set,
relation_ids,
unit_get,
DEBUG,
INFO,
WARNING,
)
from charmhelpers.fetch import (
@ -60,6 +75,7 @@ from charmhelpers.fetch import (
from charmhelpers.core.host import (
service_stop,
service_start,
service_restart,
pwgen,
lsb_release
)
@ -108,6 +124,8 @@ HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
APACHE_SSL_DIR = '/etc/apache2/ssl/keystone'
SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/'
SSL_DIR = '/var/lib/keystone/juju_ssl/'
SSL_CA_NAME = 'Ubuntu Cloud'
CLUSTER_RES = 'grp_ks_vips'
@ -197,6 +215,13 @@ valid_services = {
}
def str_is_true(value):
if value and value.lower() in ['true', 'yes']:
return True
return False
def resource_map():
'''
Dynamically generate a map of resources that will be managed for a single
@ -272,7 +297,7 @@ def do_openstack_upgrade(configs):
configs.set_release(openstack_release=new_os_rel)
configs.write_all()
if eligible_leader(CLUSTER_RES):
if is_elected_leader(CLUSTER_RES):
migrate_database()
@ -474,44 +499,57 @@ def grant_role(user, role, tenant):
def ensure_initial_admin(config):
""" Ensures the minimum admin stuff exists in whatever database we're
# Allow retry on fail since leader may not be ready yet.
# NOTE(hopem): ks client may not be installed at module import time so we
# use this wrapped approach instead.
from keystoneclient.apiclient.exceptions import InternalServerError
@retry_on_exception(3, base_delay=3, exc_type=InternalServerError)
def _ensure_initial_admin(config):
"""Ensures the minimum admin stuff exists in whatever database we're
using.
This and the helper functions it calls are meant to be idempotent and
run during install as well as during db-changed. This will maintain
the admin tenant, user, role, service entry and endpoint across every
datastore we might use.
TODO: Possibly migrate data from one backend to another after it
changes?
"""
create_tenant("admin")
create_tenant(config("service-tenant"))
"""
create_tenant("admin")
create_tenant(config("service-tenant"))
passwd = ""
if config("admin-password") != "None":
passwd = config("admin-password")
elif os.path.isfile(STORED_PASSWD):
log("Loading stored passwd from %s" % STORED_PASSWD)
passwd = open(STORED_PASSWD, 'r').readline().strip('\n')
if passwd == "":
log("Generating new passwd for user: %s" %
config("admin-user"))
cmd = ['pwgen', '-c', '16', '1']
passwd = str(subprocess.check_output(cmd)).strip()
open(STORED_PASSWD, 'w+').writelines("%s\n" % passwd)
# User is managed by ldap backend when using ldap identity
if not (config('identity-backend') == 'ldap' and config('ldap-readonly')):
create_user(config('admin-user'), passwd, tenant='admin')
update_user_password(config('admin-user'), passwd)
create_role(config('admin-role'), config('admin-user'), 'admin')
create_service_entry("keystone", "identity", "Keystone Identity Service")
passwd = ""
if config("admin-password") != "None":
passwd = config("admin-password")
elif os.path.isfile(STORED_PASSWD):
log("Loading stored passwd from %s" % STORED_PASSWD)
passwd = open(STORED_PASSWD, 'r').readline().strip('\n')
if passwd == "":
log("Generating new passwd for user: %s" %
config("admin-user"))
cmd = ['pwgen', '-c', '16', '1']
passwd = str(subprocess.check_output(cmd)).strip()
open(STORED_PASSWD, 'w+').writelines("%s\n" % passwd)
# User is managed by ldap backend when using ldap identity
if (not (config('identity-backend') == 'ldap' and
config('ldap-readonly'))):
create_user(config('admin-user'), passwd, tenant='admin')
update_user_password(config('admin-user'), passwd)
create_role(config('admin-role'), config('admin-user'), 'admin')
create_service_entry("keystone", "identity",
"Keystone Identity Service")
for region in config('region').split():
create_keystone_endpoint(public_ip=resolve_address(PUBLIC),
service_port=config("service-port"),
internal_ip=resolve_address(INTERNAL),
admin_ip=resolve_address(ADMIN),
auth_port=config("admin-port"),
region=region)
for region in config('region').split():
create_keystone_endpoint(public_ip=resolve_address(PUBLIC),
service_port=config("service-port"),
internal_ip=resolve_address(INTERNAL),
admin_ip=resolve_address(ADMIN),
auth_port=config("admin-port"),
region=region)
return _ensure_initial_admin(config)
def endpoint_url(ip, port):
@ -579,20 +617,201 @@ def get_service_password(service_username):
return passwd
def synchronize_ca():
'''
Broadcast service credentials to peers or consume those that have been
broadcasted by peer, depending on hook context.
'''
if not eligible_leader(CLUSTER_RES):
return
log('Synchronizing CA to all peers.')
if is_clustered():
if config('https-service-endpoints') in ['True', 'true']:
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
def ensure_permissions(path, user=None, group=None, perms=None):
"""Set chownand chmod for path
CA = []
Note that -1 for uid or gid result in no change.
"""
if user:
uid = pwd.getpwnam(user).pw_uid
else:
uid = -1
if group:
gid = grp.getgrnam(group).gr_gid
else:
gid = -1
os.chown(path, uid, gid)
if perms:
os.chmod(path, perms)
def check_peer_actions():
"""Honour service action requests from sync master.
Check for service action request flags, perform the action then delete the
flag.
"""
restart = relation_get(attribute='restart-services-trigger')
if restart and os.path.isdir(SYNC_FLAGS_DIR):
for flagfile in glob.glob(os.path.join(SYNC_FLAGS_DIR, '*')):
flag = os.path.basename(flagfile)
service = flag.partition('.')[0]
action = flag.partition('.')[2]
if action == 'restart':
log("Running action='%s' on service '%s'" %
(action, service), level=DEBUG)
service_restart(service)
elif action == 'start':
log("Running action='%s' on service '%s'" %
(action, service), level=DEBUG)
service_start(service)
elif action == 'stop':
log("Running action='%s' on service '%s'" %
(action, service), level=DEBUG)
service_stop(service)
elif flag == 'update-ca-certificates':
log("Running update-ca-certificates", level=DEBUG)
subprocess.check_call(['update-ca-certificates'])
else:
log("Unknown action flag=%s" % (flag), level=WARNING)
os.remove(flagfile)
def create_peer_service_actions(action, services):
"""Mark remote services for action.
Default action is restart. These action will be picked up by peer units
e.g. we may need to restart services on peer units after certs have been
synced.
"""
for service in services:
flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s' %
(service.strip(), action))
log("Creating action %s" % (flagfile), level=DEBUG)
write_file(flagfile, content='', owner=SSH_USER, group='keystone',
perms=0o644)
def create_service_action(action):
flagfile = os.path.join(SYNC_FLAGS_DIR, action)
log("Creating action %s" % (flagfile), level=DEBUG)
write_file(flagfile, content='', owner=SSH_USER, group='keystone',
perms=0o644)
def is_ssl_cert_master():
"""Return True if this unit is ssl cert master."""
master = None
for rid in relation_ids('cluster'):
master = relation_get(attribute='ssl-cert-master', rid=rid,
unit=local_unit())
if master and master == unit_get('private-address'):
return True
return False
@retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError)
def unison_sync(paths_to_sync):
"""Do unison sync and retry a few times if it fails since peers may not be
ready for sync.
"""
log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),
level=INFO)
keystone_gid = grp.getgrnam('keystone').gr_gid
unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,
user=SSH_USER, verbose=True, gid=keystone_gid,
fatal=True)
def synchronize_ca(fatal=True):
"""Broadcast service credentials to peers.
By default a failure to sync is fatal and will result in a raised
exception.
This function uses a relation setting 'ssl-cert-master' to get some
leader stickiness while synchronisation is being carried out. This ensures
that the last host to create and broadcast cetificates has the option to
complete actions before electing the new leader as sync master.
"""
paths_to_sync = [SYNC_FLAGS_DIR]
if not peer_units():
log("Not syncing certs since there are no peer units.", level=INFO)
return
# If no ssl master elected and we are cluster leader, elect this unit.
if is_elected_leader(CLUSTER_RES):
master = relation_get(attribute='ssl-cert-master')
if not master or master == 'unknown':
log("Electing this unit as ssl-cert-master", level=DEBUG)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid,
relation_settings={'ssl-cert-master':
unit_get('private-address'),
'trigger': str(uuid.uuid4())})
# Return now and wait for echo before continuing.
return
if not is_ssl_cert_master():
log("Not ssl cert master - skipping sync", level=INFO)
return
if str_is_true(config('https-service-endpoints')):
log("Syncing all endpoint certs since https-service-endpoints=True",
level=DEBUG)
paths_to_sync.append(SSL_DIR)
paths_to_sync.append(APACHE_SSL_DIR)
paths_to_sync.append(CA_CERT_PATH)
elif str_is_true(config('use-https')):
log("Syncing keystone-endpoint certs since use-https=True",
level=DEBUG)
paths_to_sync.append(APACHE_SSL_DIR)
paths_to_sync.append(CA_CERT_PATH)
if not paths_to_sync:
log("Nothing to sync - skipping", level=DEBUG)
return
# If we are sync master proceed even if we are not leader since we are
# most likely to have up-to-date certs. If not leader we will re-elect once
# synced. This is done to avoid being affected by leader changing before
# all units have synced certs.
# Clear any existing flags
if os.path.isdir(SYNC_FLAGS_DIR):
shutil.rmtree(SYNC_FLAGS_DIR)
mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775)
# We need to restart peer apache services to ensure they have picked up
# new ssl keys.
create_peer_service_actions('restart', ['apache2'])
create_service_action('update-ca-certificates')
try:
unison_sync(paths_to_sync)
except:
if fatal:
raise
else:
log("Sync failed but fatal=False", level=INFO)
return
trigger = str(uuid.uuid4())
log("Sending restart-services-trigger=%s to all peers" % (trigger),
level=DEBUG)
settings = {'restart-services-trigger': trigger}
# Cleanup
shutil.rmtree(SYNC_FLAGS_DIR)
mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775)
# If we are the sync master but no longer leader then re-elect master.
if not is_elected_leader(CLUSTER_RES):
log("Re-electing ssl cert master.", level=INFO)
settings['ssl-cert-master'] = 'unknown'
log("Sync complete - sending peer info", level=DEBUG)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, **settings)
def get_ca(user='keystone', group='keystone'):
@ -600,22 +819,31 @@ def get_ca(user='keystone', group='keystone'):
Initialize a new CA object if one hasn't already been loaded.
This will create a new CA or load an existing one.
"""
if not CA:
if not ssl.CA_SINGLETON:
if not os.path.isdir(SSL_DIR):
os.mkdir(SSL_DIR)
d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
ca_dir=os.path.join(SSL_DIR,
'%s_intermediate_ca' % d_name),
root_ca_dir=os.path.join(SSL_DIR,
'%s_root_ca' % d_name))
# SSL_DIR is synchronized via all peers over unison+ssh, need
# to ensure permissions.
subprocess.check_output(['chown', '-R', '%s.%s' % (user, group),
'%s' % SSL_DIR])
subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR])
CA.append(ca)
return CA[0]
# Tag this host as the ssl cert master.
if is_clustered() or peer_units():
peer_store(key='ssl-cert-master',
value=unit_get('private-address'))
ssl.CA_SINGLETON.append(ca)
return ssl.CA_SINGLETON[0]
def relation_list(rid):
@ -657,7 +885,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
relation_data["auth_port"] = config('admin-port')
relation_data["service_port"] = config('service-port')
relation_data["region"] = config('region')
if config('https-service-endpoints') in ['True', 'true']:
if str_is_true(config('https-service-endpoints')):
# Pass CA cert as client will need it to
# verify https connections
ca = get_ca(user=SSH_USER)
@ -796,7 +1024,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
relation_data["auth_protocol"] = "http"
relation_data["service_protocol"] = "http"
# generate or get a new cert/key for service if set to manage certs.
if config('https-service-endpoints') in ['True', 'true']:
if str_is_true(config('https-service-endpoints')):
ca = get_ca(user=SSH_USER)
# NOTE(jamespage) may have multiple cns to deal with to iterate
https_cns = set(https_cns)

@ -43,8 +43,7 @@ TO_PATCH = [
# charmhelpers.contrib.openstack.utils
'configure_installation_source',
# charmhelpers.contrib.hahelpers.cluster_utils
'is_leader',
'eligible_leader',
'is_elected_leader',
'get_hacluster_config',
# keystone_utils
'restart_map',
@ -234,6 +233,7 @@ class KeystoneRelationTests(CharmTestCase):
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@ -242,9 +242,10 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined):
configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions):
self.openstack_upgrade_available.return_value = False
self.eligible_leader.return_value = True
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.relation_list.return_value = ['unit/0']
@ -262,8 +263,10 @@ class KeystoneRelationTests(CharmTestCase):
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
remote_unit='unit/0',
sync_certs=False)
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@ -272,9 +275,10 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_not_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined):
configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions):
self.openstack_upgrade_available.return_value = False
self.eligible_leader.return_value = False
self.is_elected_leader.return_value = False
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
@ -288,6 +292,7 @@ class KeystoneRelationTests(CharmTestCase):
self.assertFalse(self.ensure_initial_admin.called)
self.assertFalse(identity_changed.called)
@patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@ -296,9 +301,10 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'configure_https')
def test_config_changed_with_openstack_upgrade(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined):
configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions):
self.openstack_upgrade_available.return_value = True
self.eligible_leader.return_value = True
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.relation_list.return_value = ['unit/0']
@ -318,13 +324,14 @@ class KeystoneRelationTests(CharmTestCase):
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
remote_unit='unit/0',
sync_certs=False)
@patch.object(hooks, 'hashlib')
@patch.object(hooks, 'send_notifications')
def test_identity_changed_leader(self, mock_send_notifications,
mock_hashlib):
self.eligible_leader.return_value = True
self.is_elected_leader.return_value = True
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
@ -334,7 +341,7 @@ class KeystoneRelationTests(CharmTestCase):
self.assertTrue(self.synchronize_ca.called)
def test_identity_changed_no_leader(self):
self.eligible_leader.return_value = False
self.is_elected_leader.return_value = False
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
@ -349,12 +356,14 @@ class KeystoneRelationTests(CharmTestCase):
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
@patch.object(hooks, 'check_peer_actions')
@patch.object(unison, 'ssh_authorized_peers')
@patch.object(hooks, 'CONFIGS')
def test_cluster_changed(self, configs, ssh_authorized_peers):
def test_cluster_changed(self, configs, ssh_authorized_peers,
check_peer_actions):
hooks.cluster_changed()
self.peer_echo.assert_called_with(includes=['_passwd',
'identity-service:'])
whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master']
self.peer_echo.assert_called_with(includes=whitelist)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
@ -411,7 +420,7 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_not_clustered_not_leader(self, configs):
self.relation_get.return_value = False
self.is_leader.return_value = False
self.is_elected_leader.return_value = False
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
@ -421,7 +430,7 @@ class KeystoneRelationTests(CharmTestCase):
def test_ha_relation_changed_clustered_leader(
self, configs, identity_changed):
self.relation_get.return_value = True
self.is_leader.return_value = True
self.is_elected_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
@ -432,7 +441,8 @@ class KeystoneRelationTests(CharmTestCase):
'keystone endpoint configuration')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
remote_unit='unit/0',
sync_certs=False)
@patch.object(hooks, 'CONFIGS')
def test_configure_https_enable(self, configs):
@ -458,7 +468,7 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_leader(self, ssh_authorized_peers):
self.eligible_leader.return_value = True
self.is_elected_leader.return_value = True
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
@ -473,7 +483,7 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_not_leader(self, ssh_authorized_peers):
self.eligible_leader.return_value = False
self.is_elected_leader.return_value = False
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)

@ -26,7 +26,7 @@ TO_PATCH = [
'get_os_codename_install_source',
'grant_role',
'configure_installation_source',
'eligible_leader',
'is_elected_leader',
'https',
'is_clustered',
'peer_store_and_set',
@ -113,7 +113,7 @@ class TestKeystoneUtils(CharmTestCase):
self, migrate_database, determine_packages, configs):
self.test_config.set('openstack-origin', 'precise')
determine_packages.return_value = []
self.eligible_leader.return_value = True
self.is_elected_leader.return_value = True
utils.do_openstack_upgrade(configs)