[hopem,r=gnuoy]
Fixes ssl cert synchronisation across peers Closes-Bug: 1317782
This commit is contained in:
commit
50382ceb7e
76
README.md
76
README.md
@ -1,34 +1,70 @@
|
||||
This charm provides Keystone, the Openstack identity service. It's target
|
||||
platform is Ubuntu Precise + Openstack Essex. This has not been tested
|
||||
using Oneiric + Diablo.
|
||||
Overview
|
||||
========
|
||||
|
||||
It provides three interfaces.
|
||||
|
||||
- identity-service: Openstack API endpoints request an entry in the
|
||||
Keystone service catalog + endpoint template catalog. When a relation
|
||||
This charm provides Keystone, the Openstack identity service. It's target
|
||||
platform is (ideally) Ubuntu LTS + Openstack.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
The following interfaces are provided:
|
||||
|
||||
- nrpe-external-master: Used to generate Nagios checks.
|
||||
|
||||
- identity-service: Openstack API endpoints request an entry in the
|
||||
Keystone service catalog + endpoint template catalog. When a relation
|
||||
is established, Keystone receives: service name, region, public_url,
|
||||
admin_url and internal_url. It first checks that the requested service
|
||||
is listed as a supported service. This list should stay updated to
|
||||
support current Openstack core services. If the services is supported,
|
||||
a entry in the service catalog is created, an endpoint template is
|
||||
created and a admin token is generated. The other end of the relation
|
||||
recieves the token as well as info on which ports Keystone is listening.
|
||||
admin_url and internal_url. It first checks that the requested service
|
||||
is listed as a supported service. This list should stay updated to
|
||||
support current Openstack core services. If the service is supported,
|
||||
an entry in the service catalog is created, an endpoint template is
|
||||
created and a admin token is generated. The other end of the relation
|
||||
receives the token as well as info on which ports Keystone is listening
|
||||
on.
|
||||
|
||||
- keystone-service: This is currently only used by Horizon/dashboard
|
||||
- keystone-service: This is currently only used by Horizon/dashboard
|
||||
as its interaction with Keystone is different from other Openstack API
|
||||
servicies. That is, Horizon requests a Keystone role and token exists.
|
||||
services. That is, Horizon requests a Keystone role and token exists.
|
||||
During a relation, Horizon requests its configured default role and
|
||||
Keystone responds with a token and the auth + admin ports on which
|
||||
Keystone is listening.
|
||||
|
||||
- identity-admin: Charms use this relation to obtain the credentials
|
||||
for the admin user. This is intended for charms that automatically
|
||||
- identity-admin: Charms use this relation to obtain the credentials
|
||||
for the admin user. This is intended for charms that automatically
|
||||
provision users, tenants, etc. or that otherwise automate using the
|
||||
Openstack cluster deployment.
|
||||
|
||||
Keystone requires a database. By default, a local sqlite database is used.
|
||||
The charm supports relations to a shared-db via mysql-shared interface. When
|
||||
- identity-notifications: Used to broadcast messages to any services
|
||||
listening on the interface.
|
||||
|
||||
Database
|
||||
--------
|
||||
|
||||
Keystone requires a database. By default, a local sqlite database is used.
|
||||
The charm supports relations to a shared-db via mysql-shared interface. When
|
||||
a new data store is configured, the charm ensures the minimum administrator
|
||||
credentials exist (as configured via charm configuration)
|
||||
|
||||
VIP is only required if you plan on multi-unit clusterming. The VIP becomes a highly-available API endpoint.
|
||||
HA/Clustering
|
||||
-------------
|
||||
|
||||
VIP is only required if you plan on multi-unit clustering (requires relating
|
||||
with hacluster charm). The VIP becomes a highly-available API endpoint.
|
||||
|
||||
SSL/HTTPS
|
||||
---------
|
||||
|
||||
This charm also supports SSL and HTTPS endpoints. In order to ensure SSL
|
||||
certificates are only created once and distributed to all units, one unit gets
|
||||
elected as an ssl-cert-master. One side-effect of this is that as units are
|
||||
scaled-out the currently elected leader needs to be running in order for nodes
|
||||
to sync certificates. This 'feature' is to work around the lack of native
|
||||
leadership election via Juju itself, a feature that is due for release some
|
||||
time soon but until then we have to rely on this. Also, if a keystone unit does
|
||||
go down, it must be removed from Juju i.e.
|
||||
|
||||
juju destroy-unit keystone/<unit-num>
|
||||
|
||||
Otherwise it will be assumed that this unit may come back at some point and
|
||||
therefore must be know to be in-sync with the rest before continuing.
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
import os
|
||||
|
||||
from charmhelpers.core.hookenv import config
|
||||
|
||||
from charmhelpers.core.host import mkdir, write_file
|
||||
@ -6,13 +8,16 @@ from charmhelpers.contrib.openstack import context
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_apache_port,
|
||||
determine_api_port
|
||||
determine_api_port,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.apache import install_ca_cert
|
||||
|
||||
import os
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
|
||||
|
||||
@ -29,20 +34,52 @@ class ApacheSSLContext(context.ApacheSSLContext):
|
||||
return super(ApacheSSLContext, self).__call__()
|
||||
|
||||
def configure_cert(self, cn):
|
||||
from keystone_utils import SSH_USER, get_ca
|
||||
from keystone_utils import (
|
||||
SSH_USER,
|
||||
get_ca,
|
||||
ensure_permissions,
|
||||
is_ssl_cert_master,
|
||||
)
|
||||
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
|
||||
mkdir(path=ssl_dir)
|
||||
perms = 0o755
|
||||
mkdir(path=ssl_dir, owner=SSH_USER, group='keystone', perms=perms)
|
||||
# Ensure accessible by keystone ssh user and group (for sync)
|
||||
ensure_permissions(ssl_dir, user=SSH_USER, group='keystone',
|
||||
perms=perms)
|
||||
|
||||
if not is_ssl_cert_master():
|
||||
log("Not ssl-cert-master - skipping apache cert config",
|
||||
level=INFO)
|
||||
return
|
||||
|
||||
log("Creating apache ssl certs in %s" % (ssl_dir), level=INFO)
|
||||
|
||||
ca = get_ca(user=SSH_USER)
|
||||
cert, key = ca.get_cert_and_key(common_name=cn)
|
||||
write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)),
|
||||
content=cert)
|
||||
content=cert, owner=SSH_USER, group='keystone', perms=0o644)
|
||||
write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)),
|
||||
content=key)
|
||||
content=key, owner=SSH_USER, group='keystone', perms=0o644)
|
||||
|
||||
def configure_ca(self):
|
||||
from keystone_utils import SSH_USER, get_ca
|
||||
from keystone_utils import (
|
||||
SSH_USER,
|
||||
get_ca,
|
||||
ensure_permissions,
|
||||
is_ssl_cert_master,
|
||||
)
|
||||
|
||||
if not is_ssl_cert_master():
|
||||
log("Not ssl-cert-master - skipping apache cert config",
|
||||
level=INFO)
|
||||
return
|
||||
|
||||
ca = get_ca(user=SSH_USER)
|
||||
install_ca_cert(ca.get_ca_bundle())
|
||||
# Ensure accessible by keystone ssh user and group (unison)
|
||||
ensure_permissions(CA_CERT_PATH, user=SSH_USER, group='keystone',
|
||||
perms=0o0644)
|
||||
|
||||
def canonical_names(self):
|
||||
addresses = self.get_network_addresses()
|
||||
|
@ -1,7 +1,9 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import sys
|
||||
import time
|
||||
|
||||
@ -16,6 +18,8 @@ from charmhelpers.core.hookenv import (
|
||||
is_relation_made,
|
||||
log,
|
||||
local_unit,
|
||||
DEBUG,
|
||||
WARNING,
|
||||
ERROR,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
@ -48,9 +52,8 @@ from keystone_utils import (
|
||||
get_admin_passwd,
|
||||
migrate_database,
|
||||
save_script_rc,
|
||||
synchronize_ca,
|
||||
synchronize_ca_if_changed,
|
||||
register_configs,
|
||||
relation_list,
|
||||
restart_map,
|
||||
services,
|
||||
CLUSTER_RES,
|
||||
@ -58,12 +61,18 @@ from keystone_utils import (
|
||||
SSH_USER,
|
||||
setup_ipv6,
|
||||
send_notifications,
|
||||
check_peer_actions,
|
||||
CA_CERT_PATH,
|
||||
ensure_permissions,
|
||||
get_ssl_sync_request_units,
|
||||
is_str_true,
|
||||
is_ssl_cert_master,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
eligible_leader,
|
||||
is_leader,
|
||||
is_elected_leader,
|
||||
get_hacluster_config,
|
||||
peer_units,
|
||||
)
|
||||
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
@ -73,6 +82,7 @@ from charmhelpers.contrib.peerstorage import (
|
||||
)
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
ADMIN,
|
||||
PUBLIC,
|
||||
resolve_address,
|
||||
)
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
@ -100,12 +110,14 @@ def install():
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
@restart_on_change(restart_map())
|
||||
@synchronize_ca_if_changed()
|
||||
def config_changed():
|
||||
if config('prefer-ipv6'):
|
||||
setup_ipv6()
|
||||
sync_db_with_multi_ipv6_addresses(config('database'),
|
||||
config('database-user'))
|
||||
|
||||
unison.ensure_user(user=SSH_USER, group='juju_keystone')
|
||||
unison.ensure_user(user=SSH_USER, group='keystone')
|
||||
homedir = unison.get_homedir(SSH_USER)
|
||||
if not os.path.isdir(homedir):
|
||||
@ -116,25 +128,33 @@ def config_changed():
|
||||
|
||||
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
|
||||
|
||||
# Ensure unison can write to certs dir.
|
||||
# FIXME: need to a better way around this e.g. move cert to it's own dir
|
||||
# and give that unison permissions.
|
||||
path = os.path.dirname(CA_CERT_PATH)
|
||||
perms = int(oct(stat.S_IMODE(os.stat(path).st_mode) |
|
||||
(stat.S_IWGRP | stat.S_IXGRP)), base=8)
|
||||
ensure_permissions(path, group='keystone', perms=perms)
|
||||
|
||||
save_script_rc()
|
||||
configure_https()
|
||||
update_nrpe_config()
|
||||
CONFIGS.write_all()
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
migrate_database()
|
||||
ensure_initial_admin(config)
|
||||
log('Firing identity_changed hook for all related services.')
|
||||
# HTTPS may have been set - so fire all identity relations
|
||||
# again
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
identity_changed(relation_id=r_id,
|
||||
remote_unit=unit)
|
||||
|
||||
# Update relations since SSL may have been configured. If we have peer
|
||||
# units we can rely on the sync to do this in cluster relation.
|
||||
if is_elected_leader(CLUSTER_RES) and not peer_units():
|
||||
update_all_identity_relation_units()
|
||||
|
||||
for rid in relation_ids('identity-admin'):
|
||||
admin_relation_changed(rid)
|
||||
for rid in relation_ids('cluster'):
|
||||
cluster_joined(rid)
|
||||
|
||||
# Ensure sync request is sent out (needed for upgrade to ssl from non-ssl)
|
||||
settings = {}
|
||||
append_ssl_sync_request(settings)
|
||||
if settings:
|
||||
for rid in relation_ids('cluster'):
|
||||
relation_set(relation_id=rid, relation_settings=settings)
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-joined')
|
||||
@ -167,14 +187,35 @@ def pgsql_db_joined():
|
||||
relation_set(database=config('database'))
|
||||
|
||||
|
||||
def update_all_identity_relation_units():
|
||||
CONFIGS.write_all()
|
||||
try:
|
||||
migrate_database()
|
||||
except Exception as exc:
|
||||
log("Database initialisation failed (%s) - db not ready?" % (exc),
|
||||
level=WARNING)
|
||||
else:
|
||||
ensure_initial_admin(config)
|
||||
log('Firing identity_changed hook for all related services.')
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
identity_changed(relation_id=rid, remote_unit=unit)
|
||||
|
||||
|
||||
@synchronize_ca_if_changed(force=True)
|
||||
def update_all_identity_relation_units_force_sync():
|
||||
update_all_identity_relation_units()
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
@synchronize_ca_if_changed()
|
||||
def db_changed():
|
||||
if 'shared-db' not in CONFIGS.complete_contexts():
|
||||
log('shared-db relation incomplete. Peer not ready?')
|
||||
else:
|
||||
CONFIGS.write(KEYSTONE_CONF)
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
if is_elected_leader(CLUSTER_RES):
|
||||
# Bugs 1353135 & 1187508. Dbs can appear to be ready before the
|
||||
# units acl entry has been added. So, if the db supports passing
|
||||
# a list of permitted units then check if we're in the list.
|
||||
@ -182,38 +223,46 @@ def db_changed():
|
||||
if allowed_units and local_unit() not in allowed_units.split():
|
||||
log('Allowed_units list provided and this unit not present')
|
||||
return
|
||||
migrate_database()
|
||||
ensure_initial_admin(config)
|
||||
# Ensure any existing service entries are updated in the
|
||||
# new database backend
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
identity_changed(relation_id=rid, remote_unit=unit)
|
||||
update_all_identity_relation_units()
|
||||
|
||||
|
||||
@hooks.hook('pgsql-db-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
@synchronize_ca_if_changed()
|
||||
def pgsql_db_changed():
|
||||
if 'pgsql-db' not in CONFIGS.complete_contexts():
|
||||
log('pgsql-db relation incomplete. Peer not ready?')
|
||||
else:
|
||||
CONFIGS.write(KEYSTONE_CONF)
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
migrate_database()
|
||||
ensure_initial_admin(config)
|
||||
if is_elected_leader(CLUSTER_RES):
|
||||
# Ensure any existing service entries are updated in the
|
||||
# new database backend
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
identity_changed(relation_id=rid, remote_unit=unit)
|
||||
update_all_identity_relation_units()
|
||||
|
||||
|
||||
@hooks.hook('identity-service-relation-changed')
|
||||
@synchronize_ca_if_changed()
|
||||
def identity_changed(relation_id=None, remote_unit=None):
|
||||
CONFIGS.write_all()
|
||||
|
||||
notifications = {}
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
add_service_to_keystone(relation_id, remote_unit)
|
||||
synchronize_ca()
|
||||
if is_elected_leader(CLUSTER_RES):
|
||||
# Catch database not configured error and defer until db ready
|
||||
from keystoneclient.apiclient.exceptions import InternalServerError
|
||||
try:
|
||||
add_service_to_keystone(relation_id, remote_unit)
|
||||
except InternalServerError as exc:
|
||||
key = re.compile("'keystone\..+' doesn't exist")
|
||||
if re.search(key, exc.message):
|
||||
log("Keystone database not yet ready (InternalServerError "
|
||||
"raised) - deferring until *-db relation completes.",
|
||||
level=WARNING)
|
||||
return
|
||||
|
||||
log("Unexpected exception occurred", level=ERROR)
|
||||
raise
|
||||
|
||||
settings = relation_get(rid=relation_id, unit=remote_unit)
|
||||
service = settings.get('service', None)
|
||||
@ -241,46 +290,113 @@ def identity_changed(relation_id=None, remote_unit=None):
|
||||
send_notifications(notifications)
|
||||
|
||||
|
||||
def append_ssl_sync_request(settings):
|
||||
"""Add request to be synced to relation settings.
|
||||
|
||||
This will be consumed by cluster-relation-changed ssl master.
|
||||
"""
|
||||
if (is_str_true(config('use-https')) or
|
||||
is_str_true(config('https-service-endpoints'))):
|
||||
unit = local_unit().replace('/', '-')
|
||||
settings['ssl-sync-required-%s' % (unit)] = '1'
|
||||
|
||||
|
||||
@hooks.hook('cluster-relation-joined')
|
||||
def cluster_joined(relation_id=None):
|
||||
def cluster_joined():
|
||||
unison.ssh_authorized_peers(user=SSH_USER,
|
||||
group='juju_keystone',
|
||||
peer_interface='cluster',
|
||||
ensure_local_user=True)
|
||||
|
||||
settings = {}
|
||||
|
||||
for addr_type in ADDRESS_TYPES:
|
||||
address = get_address_in_network(
|
||||
config('os-{}-network'.format(addr_type))
|
||||
)
|
||||
if address:
|
||||
relation_set(
|
||||
relation_id=relation_id,
|
||||
relation_settings={'{}-address'.format(addr_type): address}
|
||||
)
|
||||
settings['{}-address'.format(addr_type)] = address
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
relation_set(relation_id=relation_id,
|
||||
relation_settings={'private-address': private_addr})
|
||||
settings['private-address'] = private_addr
|
||||
|
||||
append_ssl_sync_request(settings)
|
||||
|
||||
relation_set(relation_settings=settings)
|
||||
|
||||
|
||||
def apply_echo_filters(settings, echo_whitelist):
|
||||
"""Filter settings to be peer_echo'ed.
|
||||
|
||||
We may have received some data that we don't want to re-echo so filter
|
||||
out unwanted keys and provide overrides.
|
||||
|
||||
Returns:
|
||||
tuple(filtered list of keys to be echoed, overrides for keys omitted)
|
||||
"""
|
||||
filtered = []
|
||||
overrides = {}
|
||||
for key in settings.iterkeys():
|
||||
for ekey in echo_whitelist:
|
||||
if ekey in key:
|
||||
if ekey == 'identity-service:':
|
||||
auth_host = resolve_address(ADMIN)
|
||||
service_host = resolve_address(PUBLIC)
|
||||
if (key.endswith('auth_host') and
|
||||
settings[key] != auth_host):
|
||||
overrides[key] = auth_host
|
||||
continue
|
||||
elif (key.endswith('service_host') and
|
||||
settings[key] != service_host):
|
||||
overrides[key] = service_host
|
||||
continue
|
||||
|
||||
filtered.append(key)
|
||||
|
||||
return filtered, overrides
|
||||
|
||||
|
||||
@hooks.hook('cluster-relation-changed',
|
||||
'cluster-relation-departed')
|
||||
@restart_on_change(restart_map(), stopstart=True)
|
||||
def cluster_changed():
|
||||
settings = relation_get()
|
||||
# NOTE(jamespage) re-echo passwords for peer storage
|
||||
peer_echo(includes=['_passwd', 'identity-service:'])
|
||||
echo_whitelist, overrides = \
|
||||
apply_echo_filters(settings, ['_passwd', 'identity-service:',
|
||||
'ssl-cert-master'])
|
||||
log("Peer echo overrides: %s" % (overrides), level=DEBUG)
|
||||
relation_set(**overrides)
|
||||
if echo_whitelist:
|
||||
log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
|
||||
peer_echo(includes=echo_whitelist)
|
||||
|
||||
check_peer_actions()
|
||||
unison.ssh_authorized_peers(user=SSH_USER,
|
||||
group='keystone',
|
||||
peer_interface='cluster',
|
||||
ensure_local_user=True)
|
||||
synchronize_ca()
|
||||
CONFIGS.write_all()
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
identity_changed(relation_id=r_id,
|
||||
remote_unit=unit)
|
||||
for rid in relation_ids('identity-admin'):
|
||||
admin_relation_changed(rid)
|
||||
|
||||
if is_elected_leader(CLUSTER_RES) or is_ssl_cert_master():
|
||||
units = get_ssl_sync_request_units()
|
||||
synced_units = relation_get(attribute='ssl-synced-units',
|
||||
unit=local_unit())
|
||||
if synced_units:
|
||||
synced_units = json.loads(synced_units)
|
||||
diff = set(units).symmetric_difference(set(synced_units))
|
||||
|
||||
if units and (not synced_units or diff):
|
||||
log("New peers joined and need syncing - %s" %
|
||||
(', '.join(units)), level=DEBUG)
|
||||
update_all_identity_relation_units_force_sync()
|
||||
else:
|
||||
update_all_identity_relation_units()
|
||||
|
||||
for rid in relation_ids('identity-admin'):
|
||||
admin_relation_changed(rid)
|
||||
else:
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-joined')
|
||||
@ -320,7 +436,7 @@ def ha_joined():
|
||||
vip_group.append(vip_key)
|
||||
|
||||
if len(vip_group) >= 1:
|
||||
relation_set(groups={'grp_ks_vips': ' '.join(vip_group)})
|
||||
relation_set(groups={CLUSTER_RES: ' '.join(vip_group)})
|
||||
|
||||
init_services = {
|
||||
'res_ks_haproxy': 'haproxy'
|
||||
@ -338,17 +454,17 @@ def ha_joined():
|
||||
|
||||
@hooks.hook('ha-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
@synchronize_ca_if_changed()
|
||||
def ha_changed():
|
||||
clustered = relation_get('clustered')
|
||||
CONFIGS.write_all()
|
||||
if (clustered is not None and
|
||||
is_leader(CLUSTER_RES)):
|
||||
|
||||
clustered = relation_get('clustered')
|
||||
if clustered and is_elected_leader(CLUSTER_RES):
|
||||
ensure_initial_admin(config)
|
||||
log('Cluster configured, notifying other services and updating '
|
||||
'keystone endpoint configuration')
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
identity_changed(relation_id=rid, remote_unit=unit)
|
||||
|
||||
update_all_identity_relation_units()
|
||||
|
||||
|
||||
@hooks.hook('identity-admin-relation-changed')
|
||||
@ -365,6 +481,7 @@ def admin_relation_changed(relation_id=None):
|
||||
relation_set(relation_id=relation_id, **relation_data)
|
||||
|
||||
|
||||
@synchronize_ca_if_changed(fatal=True)
|
||||
def configure_https():
|
||||
'''
|
||||
Enables SSL API Apache config if appropriate and kicks identity-service
|
||||
@ -383,25 +500,22 @@ def configure_https():
|
||||
|
||||
@hooks.hook('upgrade-charm')
|
||||
@restart_on_change(restart_map(), stopstart=True)
|
||||
@synchronize_ca_if_changed()
|
||||
def upgrade_charm():
|
||||
apt_install(filter_installed_packages(determine_packages()))
|
||||
unison.ssh_authorized_peers(user=SSH_USER,
|
||||
group='keystone',
|
||||
peer_interface='cluster',
|
||||
ensure_local_user=True)
|
||||
update_nrpe_config()
|
||||
synchronize_ca()
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
log('Cluster leader - ensuring endpoint configuration'
|
||||
' is up to date')
|
||||
time.sleep(10)
|
||||
ensure_initial_admin(config)
|
||||
# Deal with interface changes for icehouse
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
identity_changed(relation_id=r_id,
|
||||
remote_unit=unit)
|
||||
|
||||
CONFIGS.write_all()
|
||||
update_nrpe_config()
|
||||
|
||||
if is_elected_leader(CLUSTER_RES):
|
||||
log('Cluster leader - ensuring endpoint configuration is up to '
|
||||
'date', level=DEBUG)
|
||||
time.sleep(10)
|
||||
update_all_identity_relation_units()
|
||||
|
||||
|
||||
@hooks.hook('nrpe-external-master-relation-joined',
|
||||
|
@ -5,6 +5,13 @@ import shutil
|
||||
import subprocess
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
DEBUG,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
CA_EXPIRY = '365'
|
||||
ORG_NAME = 'Ubuntu'
|
||||
@ -101,6 +108,9 @@ keyUsage = digitalSignature, keyEncipherment, keyAgreement
|
||||
extendedKeyUsage = serverAuth, clientAuth
|
||||
"""
|
||||
|
||||
# Instance can be appended to this list to represent a singleton
|
||||
CA_SINGLETON = []
|
||||
|
||||
|
||||
def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
|
||||
print 'Ensuring certificate authority exists at %s.' % ca_dir
|
||||
@ -275,23 +285,42 @@ class JujuCA(object):
|
||||
crt = self._sign_csr(csr, service, common_name)
|
||||
cmd = ['chown', '-R', '%s.%s' % (self.user, self.group), self.ca_dir]
|
||||
subprocess.check_call(cmd)
|
||||
print 'Signed new CSR, crt @ %s' % crt
|
||||
log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
|
||||
return crt, key
|
||||
|
||||
def get_cert_and_key(self, common_name):
|
||||
print 'Getting certificate and key for %s.' % common_name
|
||||
key = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name)
|
||||
crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
|
||||
if os.path.isfile(crt):
|
||||
print 'Found existing certificate for %s.' % common_name
|
||||
crt = open(crt, 'r').read()
|
||||
try:
|
||||
key = open(key, 'r').read()
|
||||
except:
|
||||
print 'Could not load ssl private key for %s from %s' %\
|
||||
(common_name, key)
|
||||
exit(1)
|
||||
return crt, key
|
||||
log('Getting certificate and key for %s.' % common_name, level=DEBUG)
|
||||
keypath = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name)
|
||||
crtpath = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
|
||||
if os.path.isfile(crtpath):
|
||||
log('Found existing certificate for %s.' % common_name,
|
||||
level=DEBUG)
|
||||
max_retries = 3
|
||||
while True:
|
||||
mtime = os.path.getmtime(crtpath)
|
||||
|
||||
crt = open(crtpath, 'r').read()
|
||||
try:
|
||||
key = open(keypath, 'r').read()
|
||||
except:
|
||||
msg = ('Could not load ssl private key for %s from %s' %
|
||||
(common_name, keypath))
|
||||
raise Exception(msg)
|
||||
|
||||
# Ensure we are not reading a file that is being written to
|
||||
if mtime != os.path.getmtime(crtpath):
|
||||
max_retries -= 1
|
||||
if max_retries == 0:
|
||||
msg = ("crt contents changed during read - retry "
|
||||
"failed")
|
||||
raise Exception(msg)
|
||||
|
||||
log("crt contents changed during read - re-reading",
|
||||
level=WARNING)
|
||||
time.sleep(1)
|
||||
else:
|
||||
return crt, key
|
||||
|
||||
crt, key = self._create_certificate(common_name, common_name)
|
||||
return open(crt, 'r').read(), open(key, 'r').read()
|
||||
|
||||
|
@ -1,20 +1,27 @@
|
||||
#!/usr/bin/python
|
||||
import subprocess
|
||||
import glob
|
||||
import grp
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
import urlparse
|
||||
import pwd
|
||||
import re
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
import urlparse
|
||||
import uuid
|
||||
|
||||
from base64 import b64encode
|
||||
from collections import OrderedDict
|
||||
from copy import deepcopy
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import(
|
||||
eligible_leader,
|
||||
is_elected_leader,
|
||||
determine_api_port,
|
||||
https,
|
||||
is_clustered,
|
||||
is_elected_leader,
|
||||
peer_units,
|
||||
oldest_peer,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack import context, templating
|
||||
@ -37,8 +44,17 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
os_release,
|
||||
save_script_rc as _save_script_rc)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mkdir,
|
||||
write_file,
|
||||
)
|
||||
|
||||
import charmhelpers.contrib.unison as unison
|
||||
|
||||
from charmhelpers.core.decorators import (
|
||||
retry_on_exception,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
is_relation_made,
|
||||
@ -47,8 +63,11 @@ from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_set,
|
||||
relation_ids,
|
||||
related_units,
|
||||
DEBUG,
|
||||
INFO,
|
||||
WARNING,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
@ -61,6 +80,7 @@ from charmhelpers.fetch import (
|
||||
from charmhelpers.core.host import (
|
||||
service_stop,
|
||||
service_start,
|
||||
service_restart,
|
||||
pwgen,
|
||||
lsb_release
|
||||
)
|
||||
@ -110,10 +130,13 @@ HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
||||
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
|
||||
|
||||
APACHE_SSL_DIR = '/etc/apache2/ssl/keystone'
|
||||
SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/'
|
||||
SSL_DIR = '/var/lib/keystone/juju_ssl/'
|
||||
SSL_CA_NAME = 'Ubuntu Cloud'
|
||||
CLUSTER_RES = 'grp_ks_vips'
|
||||
SSH_USER = 'juju_keystone'
|
||||
SSL_SYNC_SEMAPHORE = threading.Semaphore()
|
||||
|
||||
BASE_RESOURCE_MAP = OrderedDict([
|
||||
(KEYSTONE_CONF, {
|
||||
@ -203,6 +226,13 @@ valid_services = {
|
||||
}
|
||||
|
||||
|
||||
def is_str_true(value):
|
||||
if value and value.lower() in ['true', 'yes']:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def resource_map():
|
||||
'''
|
||||
Dynamically generate a map of resources that will be managed for a single
|
||||
@ -287,7 +317,7 @@ def do_openstack_upgrade(configs):
|
||||
configs.set_release(openstack_release=new_os_rel)
|
||||
configs.write_all()
|
||||
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
if is_elected_leader(CLUSTER_RES):
|
||||
migrate_database()
|
||||
|
||||
|
||||
@ -389,7 +419,7 @@ def create_endpoint_template(region, service, publicurl, adminurl,
|
||||
|
||||
up_to_date = True
|
||||
for k in ['publicurl', 'adminurl', 'internalurl']:
|
||||
if ep[k] != locals()[k]:
|
||||
if ep.get(k) != locals()[k]:
|
||||
up_to_date = False
|
||||
|
||||
if up_to_date:
|
||||
@ -500,7 +530,7 @@ def get_admin_passwd():
|
||||
if passwd and passwd.lower() != "none":
|
||||
return passwd
|
||||
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
if is_elected_leader(CLUSTER_RES):
|
||||
if os.path.isfile(STORED_PASSWD):
|
||||
log("Loading stored passwd from %s" % STORED_PASSWD, level=INFO)
|
||||
with open(STORED_PASSWD, 'r') as fd:
|
||||
@ -527,33 +557,47 @@ def get_admin_passwd():
|
||||
|
||||
|
||||
def ensure_initial_admin(config):
|
||||
""" Ensures the minimum admin stuff exists in whatever database we're
|
||||
# Allow retry on fail since leader may not be ready yet.
|
||||
# NOTE(hopem): ks client may not be installed at module import time so we
|
||||
# use this wrapped approach instead.
|
||||
from keystoneclient.apiclient.exceptions import InternalServerError
|
||||
|
||||
@retry_on_exception(3, base_delay=3, exc_type=InternalServerError)
|
||||
def _ensure_initial_admin(config):
|
||||
"""Ensures the minimum admin stuff exists in whatever database we're
|
||||
using.
|
||||
|
||||
This and the helper functions it calls are meant to be idempotent and
|
||||
run during install as well as during db-changed. This will maintain
|
||||
the admin tenant, user, role, service entry and endpoint across every
|
||||
datastore we might use.
|
||||
|
||||
TODO: Possibly migrate data from one backend to another after it
|
||||
changes?
|
||||
"""
|
||||
create_tenant("admin")
|
||||
create_tenant(config("service-tenant"))
|
||||
# User is managed by ldap backend when using ldap identity
|
||||
if not (config('identity-backend') == 'ldap' and config('ldap-readonly')):
|
||||
passwd = get_admin_passwd()
|
||||
if passwd:
|
||||
create_user(config('admin-user'), passwd, tenant='admin')
|
||||
update_user_password(config('admin-user'), passwd)
|
||||
create_role(config('admin-role'), config('admin-user'), 'admin')
|
||||
create_service_entry("keystone", "identity", "Keystone Identity Service")
|
||||
"""
|
||||
create_tenant("admin")
|
||||
create_tenant(config("service-tenant"))
|
||||
# User is managed by ldap backend when using ldap identity
|
||||
if not (config('identity-backend') ==
|
||||
'ldap' and config('ldap-readonly')):
|
||||
passwd = get_admin_passwd()
|
||||
if passwd:
|
||||
create_user(config('admin-user'), passwd, tenant='admin')
|
||||
update_user_password(config('admin-user'), passwd)
|
||||
create_role(config('admin-role'), config('admin-user'),
|
||||
'admin')
|
||||
create_service_entry("keystone", "identity",
|
||||
"Keystone Identity Service")
|
||||
|
||||
for region in config('region').split():
|
||||
create_keystone_endpoint(public_ip=resolve_address(PUBLIC),
|
||||
service_port=config("service-port"),
|
||||
internal_ip=resolve_address(INTERNAL),
|
||||
admin_ip=resolve_address(ADMIN),
|
||||
auth_port=config("admin-port"),
|
||||
region=region)
|
||||
for region in config('region').split():
|
||||
create_keystone_endpoint(public_ip=resolve_address(PUBLIC),
|
||||
service_port=config("service-port"),
|
||||
internal_ip=resolve_address(INTERNAL),
|
||||
admin_ip=resolve_address(ADMIN),
|
||||
auth_port=config("admin-port"),
|
||||
region=region)
|
||||
|
||||
return _ensure_initial_admin(config)
|
||||
|
||||
|
||||
def endpoint_url(ip, port):
|
||||
@ -621,20 +665,357 @@ def get_service_password(service_username):
|
||||
return passwd
|
||||
|
||||
|
||||
def synchronize_ca():
|
||||
'''
|
||||
Broadcast service credentials to peers or consume those that have been
|
||||
broadcasted by peer, depending on hook context.
|
||||
'''
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
log('Synchronizing CA to all peers.')
|
||||
if is_clustered():
|
||||
if config('https-service-endpoints') in ['True', 'true']:
|
||||
unison.sync_to_peers(peer_interface='cluster',
|
||||
paths=[SSL_DIR], user=SSH_USER, verbose=True)
|
||||
def ensure_permissions(path, user=None, group=None, perms=None):
|
||||
"""Set chownand chmod for path
|
||||
|
||||
CA = []
|
||||
Note that -1 for uid or gid result in no change.
|
||||
"""
|
||||
if user:
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
else:
|
||||
uid = -1
|
||||
|
||||
if group:
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
else:
|
||||
gid = -1
|
||||
|
||||
os.chown(path, uid, gid)
|
||||
|
||||
if perms:
|
||||
os.chmod(path, perms)
|
||||
|
||||
|
||||
def check_peer_actions():
|
||||
"""Honour service action requests from sync master.
|
||||
|
||||
Check for service action request flags, perform the action then delete the
|
||||
flag.
|
||||
"""
|
||||
restart = relation_get(attribute='restart-services-trigger')
|
||||
if restart and os.path.isdir(SYNC_FLAGS_DIR):
|
||||
for flagfile in glob.glob(os.path.join(SYNC_FLAGS_DIR, '*')):
|
||||
flag = os.path.basename(flagfile)
|
||||
key = re.compile("^(.+)?\.(.+)?\.(.+)")
|
||||
res = re.search(key, flag)
|
||||
if res:
|
||||
source = res.group(1)
|
||||
service = res.group(2)
|
||||
action = res.group(3)
|
||||
else:
|
||||
key = re.compile("^(.+)?\.(.+)?")
|
||||
res = re.search(key, flag)
|
||||
source = res.group(1)
|
||||
action = res.group(2)
|
||||
|
||||
# Don't execute actions requested by this unit.
|
||||
if local_unit().replace('.', '-') != source:
|
||||
if action == 'restart':
|
||||
log("Running action='%s' on service '%s'" %
|
||||
(action, service), level=DEBUG)
|
||||
service_restart(service)
|
||||
elif action == 'start':
|
||||
log("Running action='%s' on service '%s'" %
|
||||
(action, service), level=DEBUG)
|
||||
service_start(service)
|
||||
elif action == 'stop':
|
||||
log("Running action='%s' on service '%s'" %
|
||||
(action, service), level=DEBUG)
|
||||
service_stop(service)
|
||||
elif action == 'update-ca-certificates':
|
||||
log("Running %s" % (action), level=DEBUG)
|
||||
subprocess.check_call(['update-ca-certificates'])
|
||||
else:
|
||||
log("Unknown action flag=%s" % (flag), level=WARNING)
|
||||
|
||||
try:
|
||||
os.remove(flagfile)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def create_peer_service_actions(action, services):
|
||||
"""Mark remote services for action.
|
||||
|
||||
Default action is restart. These action will be picked up by peer units
|
||||
e.g. we may need to restart services on peer units after certs have been
|
||||
synced.
|
||||
"""
|
||||
for service in services:
|
||||
flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s.%s' %
|
||||
(local_unit().replace('/', '-'),
|
||||
service.strip(), action))
|
||||
log("Creating action %s" % (flagfile), level=DEBUG)
|
||||
write_file(flagfile, content='', owner=SSH_USER, group='keystone',
|
||||
perms=0o644)
|
||||
|
||||
|
||||
def create_peer_actions(actions):
|
||||
for action in actions:
|
||||
action = "%s.%s" % (local_unit().replace('/', '-'), action)
|
||||
flagfile = os.path.join(SYNC_FLAGS_DIR, action)
|
||||
log("Creating action %s" % (flagfile), level=DEBUG)
|
||||
write_file(flagfile, content='', owner=SSH_USER, group='keystone',
|
||||
perms=0o644)
|
||||
|
||||
|
||||
@retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError)
|
||||
def unison_sync(paths_to_sync):
|
||||
"""Do unison sync and retry a few times if it fails since peers may not be
|
||||
ready for sync.
|
||||
"""
|
||||
log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),
|
||||
level=INFO)
|
||||
keystone_gid = grp.getgrnam('keystone').gr_gid
|
||||
unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,
|
||||
user=SSH_USER, verbose=True, gid=keystone_gid,
|
||||
fatal=True)
|
||||
|
||||
|
||||
def get_ssl_sync_request_units():
|
||||
"""Get list of units that have requested to be synced.
|
||||
|
||||
NOTE: this must be called from cluster relation context.
|
||||
"""
|
||||
units = []
|
||||
for unit in related_units():
|
||||
settings = relation_get(unit=unit) or {}
|
||||
rkeys = settings.keys()
|
||||
key = re.compile("^ssl-sync-required-(.+)")
|
||||
for rkey in rkeys:
|
||||
res = re.search(key, rkey)
|
||||
if res:
|
||||
units.append(res.group(1))
|
||||
|
||||
return units
|
||||
|
||||
|
||||
def is_ssl_cert_master():
|
||||
"""Return True if this unit is ssl cert master."""
|
||||
master = None
|
||||
for rid in relation_ids('cluster'):
|
||||
master = relation_get(attribute='ssl-cert-master', rid=rid,
|
||||
unit=local_unit())
|
||||
|
||||
return master == local_unit()
|
||||
|
||||
|
||||
def ensure_ssl_cert_master(use_oldest_peer=False):
|
||||
"""Ensure that an ssl cert master has been elected.
|
||||
|
||||
Normally the cluster leader will take control but we allow for this to be
|
||||
ignored since this could be called before the cluster is ready.
|
||||
"""
|
||||
# Don't do anything if we are not in ssl/https mode
|
||||
if not (is_str_true(config('use-https')) or
|
||||
is_str_true(config('https-service-endpoints'))):
|
||||
log("SSL/HTTPS is NOT enabled", level=DEBUG)
|
||||
return False
|
||||
|
||||
if not peer_units():
|
||||
log("Not syncing certs since there are no peer units.", level=INFO)
|
||||
return False
|
||||
|
||||
if use_oldest_peer:
|
||||
elect = oldest_peer(peer_units())
|
||||
else:
|
||||
elect = is_elected_leader(CLUSTER_RES)
|
||||
|
||||
if elect:
|
||||
masters = []
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
m = relation_get(rid=rid, unit=unit,
|
||||
attribute='ssl-cert-master')
|
||||
if m is not None:
|
||||
masters.append(m)
|
||||
|
||||
# We expect all peers to echo this setting
|
||||
if not masters or 'unknown' in masters:
|
||||
log("Notifying peers this unit is ssl-cert-master", level=INFO)
|
||||
for rid in relation_ids('cluster'):
|
||||
settings = {'ssl-cert-master': local_unit()}
|
||||
relation_set(relation_id=rid, relation_settings=settings)
|
||||
|
||||
# Return now and wait for cluster-relation-changed (peer_echo) for
|
||||
# sync.
|
||||
return False
|
||||
elif len(set(masters)) != 1 and local_unit() not in masters:
|
||||
log("Did not get concensus from peers on who is master (%s) - "
|
||||
"waiting for current master to release before self-electing" %
|
||||
(masters), level=INFO)
|
||||
return False
|
||||
|
||||
if not is_ssl_cert_master():
|
||||
log("Not ssl cert master - skipping sync", level=INFO)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def synchronize_ca(fatal=False):
|
||||
"""Broadcast service credentials to peers.
|
||||
|
||||
By default a failure to sync is fatal and will result in a raised
|
||||
exception.
|
||||
|
||||
This function uses a relation setting 'ssl-cert-master' to get some
|
||||
leader stickiness while synchronisation is being carried out. This ensures
|
||||
that the last host to create and broadcast cetificates has the option to
|
||||
complete actions before electing the new leader as sync master.
|
||||
"""
|
||||
paths_to_sync = [SYNC_FLAGS_DIR]
|
||||
|
||||
if is_str_true(config('https-service-endpoints')):
|
||||
log("Syncing all endpoint certs since https-service-endpoints=True",
|
||||
level=DEBUG)
|
||||
paths_to_sync.append(SSL_DIR)
|
||||
paths_to_sync.append(APACHE_SSL_DIR)
|
||||
paths_to_sync.append(CA_CERT_PATH)
|
||||
elif is_str_true(config('use-https')):
|
||||
log("Syncing keystone-endpoint certs since use-https=True",
|
||||
level=DEBUG)
|
||||
paths_to_sync.append(APACHE_SSL_DIR)
|
||||
paths_to_sync.append(CA_CERT_PATH)
|
||||
|
||||
if not paths_to_sync:
|
||||
log("Nothing to sync - skipping", level=DEBUG)
|
||||
return
|
||||
|
||||
if not os.path.isdir(SYNC_FLAGS_DIR):
|
||||
mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775)
|
||||
|
||||
# We need to restart peer apache services to ensure they have picked up
|
||||
# new ssl keys.
|
||||
create_peer_service_actions('restart', ['apache2'])
|
||||
create_peer_actions(['update-ca-certificates'])
|
||||
|
||||
# Format here needs to match that used when peers request sync
|
||||
synced_units = [unit.replace('/', '-') for unit in peer_units()]
|
||||
|
||||
retries = 3
|
||||
while True:
|
||||
hash1 = hashlib.sha256()
|
||||
for path in paths_to_sync:
|
||||
update_hash_from_path(hash1, path)
|
||||
|
||||
try:
|
||||
unison_sync(paths_to_sync)
|
||||
except:
|
||||
if fatal:
|
||||
raise
|
||||
else:
|
||||
log("Sync failed but fatal=False", level=INFO)
|
||||
return
|
||||
|
||||
hash2 = hashlib.sha256()
|
||||
for path in paths_to_sync:
|
||||
update_hash_from_path(hash2, path)
|
||||
|
||||
# Detect whether someone else has synced to this unit while we did our
|
||||
# transfer.
|
||||
if hash1.hexdigest() != hash2.hexdigest():
|
||||
retries -= 1
|
||||
if retries > 0:
|
||||
log("SSL dir contents changed during sync - retrying unison "
|
||||
"sync %s more times" % (retries), level=WARNING)
|
||||
else:
|
||||
log("SSL dir contents changed during sync - retries failed",
|
||||
level=ERROR)
|
||||
return {}
|
||||
else:
|
||||
break
|
||||
|
||||
hash = hash1.hexdigest()
|
||||
log("Sending restart-services-trigger=%s to all peers" % (hash),
|
||||
level=DEBUG)
|
||||
|
||||
log("Sync complete", level=DEBUG)
|
||||
return {'restart-services-trigger': hash,
|
||||
'ssl-synced-units': json.dumps(synced_units)}
|
||||
|
||||
|
||||
def update_hash_from_path(hash, path, recurse_depth=10):
|
||||
"""Recurse through path and update the provided hash for every file found.
|
||||
"""
|
||||
if not recurse_depth:
|
||||
log("Max recursion depth (%s) reached for update_hash_from_path() at "
|
||||
"path='%s' - not going any deeper" % (recurse_depth, path),
|
||||
level=WARNING)
|
||||
return
|
||||
|
||||
for p in glob.glob("%s/*" % path):
|
||||
if os.path.isdir(p):
|
||||
update_hash_from_path(hash, p, recurse_depth=recurse_depth - 1)
|
||||
else:
|
||||
with open(p, 'r') as fd:
|
||||
hash.update(fd.read())
|
||||
|
||||
|
||||
def synchronize_ca_if_changed(force=False, fatal=False):
|
||||
"""Decorator to perform ssl cert sync if decorated function modifies them
|
||||
in any way.
|
||||
|
||||
If force is True a sync is done regardless.
|
||||
"""
|
||||
def inner_synchronize_ca_if_changed1(f):
|
||||
def inner_synchronize_ca_if_changed2(*args, **kwargs):
|
||||
# Only sync master can do sync. Ensure (a) we are not nested and
|
||||
# (b) a master is elected and we are it.
|
||||
acquired = SSL_SYNC_SEMAPHORE.acquire(blocking=0)
|
||||
try:
|
||||
if not acquired:
|
||||
log("Nested sync - ignoring", level=DEBUG)
|
||||
return f(*args, **kwargs)
|
||||
|
||||
if not ensure_ssl_cert_master():
|
||||
log("Not leader - ignoring sync", level=DEBUG)
|
||||
return f(*args, **kwargs)
|
||||
|
||||
peer_settings = {}
|
||||
if not force:
|
||||
ssl_dirs = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]
|
||||
|
||||
hash1 = hashlib.sha256()
|
||||
for path in ssl_dirs:
|
||||
update_hash_from_path(hash1, path)
|
||||
|
||||
ret = f(*args, **kwargs)
|
||||
|
||||
hash2 = hashlib.sha256()
|
||||
for path in ssl_dirs:
|
||||
update_hash_from_path(hash2, path)
|
||||
|
||||
if hash1.hexdigest() != hash2.hexdigest():
|
||||
log("SSL certs have changed - syncing peers",
|
||||
level=DEBUG)
|
||||
peer_settings = synchronize_ca(fatal=fatal)
|
||||
else:
|
||||
log("SSL certs have not changed - skipping sync",
|
||||
level=DEBUG)
|
||||
else:
|
||||
ret = f(*args, **kwargs)
|
||||
log("Doing forced ssl cert sync", level=DEBUG)
|
||||
peer_settings = synchronize_ca(fatal=fatal)
|
||||
|
||||
# If we are the sync master but not leader, ensure we have
|
||||
# relinquished master status.
|
||||
if not is_elected_leader(CLUSTER_RES):
|
||||
log("Re-electing ssl cert master.", level=INFO)
|
||||
peer_settings['ssl-cert-master'] = 'unknown'
|
||||
|
||||
if peer_settings:
|
||||
for rid in relation_ids('cluster'):
|
||||
relation_set(relation_id=rid,
|
||||
relation_settings=peer_settings)
|
||||
|
||||
return ret
|
||||
finally:
|
||||
SSL_SYNC_SEMAPHORE.release()
|
||||
|
||||
return inner_synchronize_ca_if_changed2
|
||||
|
||||
return inner_synchronize_ca_if_changed1
|
||||
|
||||
|
||||
def get_ca(user='keystone', group='keystone'):
|
||||
@ -642,22 +1023,32 @@ def get_ca(user='keystone', group='keystone'):
|
||||
Initialize a new CA object if one hasn't already been loaded.
|
||||
This will create a new CA or load an existing one.
|
||||
"""
|
||||
if not CA:
|
||||
if not ssl.CA_SINGLETON:
|
||||
if not os.path.isdir(SSL_DIR):
|
||||
os.mkdir(SSL_DIR)
|
||||
|
||||
d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
|
||||
ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
|
||||
ca_dir=os.path.join(SSL_DIR,
|
||||
'%s_intermediate_ca' % d_name),
|
||||
root_ca_dir=os.path.join(SSL_DIR,
|
||||
'%s_root_ca' % d_name))
|
||||
|
||||
# SSL_DIR is synchronized via all peers over unison+ssh, need
|
||||
# to ensure permissions.
|
||||
subprocess.check_output(['chown', '-R', '%s.%s' % (user, group),
|
||||
'%s' % SSL_DIR])
|
||||
subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR])
|
||||
CA.append(ca)
|
||||
return CA[0]
|
||||
|
||||
# Ensure a master has been elected and prefer this unit. Note that we
|
||||
# prefer oldest peer as predicate since this action i normally only
|
||||
# performed once at deploy time when the oldest peer should be the
|
||||
# first to be ready.
|
||||
ensure_ssl_cert_master(use_oldest_peer=True)
|
||||
|
||||
ssl.CA_SINGLETON.append(ca)
|
||||
|
||||
return ssl.CA_SINGLETON[0]
|
||||
|
||||
|
||||
def relation_list(rid):
|
||||
@ -683,7 +1074,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||
https_cns = []
|
||||
if single.issubset(settings):
|
||||
# other end of relation advertised only one endpoint
|
||||
if 'None' in [v for k, v in settings.iteritems()]:
|
||||
if 'None' in settings.itervalues():
|
||||
# Some backend services advertise no endpoint but require a
|
||||
# hook execution to update auth strategy.
|
||||
relation_data = {}
|
||||
@ -699,7 +1090,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||
relation_data["auth_port"] = config('admin-port')
|
||||
relation_data["service_port"] = config('service-port')
|
||||
relation_data["region"] = config('region')
|
||||
if config('https-service-endpoints') in ['True', 'true']:
|
||||
if is_str_true(config('https-service-endpoints')):
|
||||
# Pass CA cert as client will need it to
|
||||
# verify https connections
|
||||
ca = get_ca(user=SSH_USER)
|
||||
@ -711,6 +1102,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||
for role in get_requested_roles(settings):
|
||||
log("Creating requested role: %s" % role)
|
||||
create_role(role)
|
||||
|
||||
peer_store_and_set(relation_id=relation_id,
|
||||
**relation_data)
|
||||
return
|
||||
@ -786,7 +1178,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||
if prefix:
|
||||
service_username = "%s%s" % (prefix, service_username)
|
||||
|
||||
if 'None' in [v for k, v in settings.iteritems()]:
|
||||
if 'None' in settings.itervalues():
|
||||
return
|
||||
|
||||
if not service_username:
|
||||
@ -838,7 +1230,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||
relation_data["auth_protocol"] = "http"
|
||||
relation_data["service_protocol"] = "http"
|
||||
# generate or get a new cert/key for service if set to manage certs.
|
||||
if config('https-service-endpoints') in ['True', 'true']:
|
||||
if is_str_true(config('https-service-endpoints')):
|
||||
ca = get_ca(user=SSH_USER)
|
||||
# NOTE(jamespage) may have multiple cns to deal with to iterate
|
||||
https_cns = set(https_cns)
|
||||
@ -853,6 +1245,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
|
||||
ca_bundle = ca.get_ca_bundle()
|
||||
relation_data['ca_cert'] = b64encode(ca_bundle)
|
||||
relation_data['https_keystone'] = 'True'
|
||||
|
||||
peer_store_and_set(relation_id=relation_id,
|
||||
**relation_data)
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
from mock import call, patch, MagicMock
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from test_utils import CharmTestCase
|
||||
|
||||
@ -30,7 +31,6 @@ TO_PATCH = [
|
||||
'local_unit',
|
||||
'filter_installed_packages',
|
||||
'relation_ids',
|
||||
'relation_list',
|
||||
'relation_set',
|
||||
'relation_get',
|
||||
'related_units',
|
||||
@ -42,9 +42,10 @@ TO_PATCH = [
|
||||
'restart_on_change',
|
||||
# charmhelpers.contrib.openstack.utils
|
||||
'configure_installation_source',
|
||||
# charmhelpers.contrib.openstack.ip
|
||||
'resolve_address',
|
||||
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||
'is_leader',
|
||||
'eligible_leader',
|
||||
'is_elected_leader',
|
||||
'get_hacluster_config',
|
||||
# keystone_utils
|
||||
'restart_map',
|
||||
@ -55,7 +56,7 @@ TO_PATCH = [
|
||||
'migrate_database',
|
||||
'ensure_initial_admin',
|
||||
'add_service_to_keystone',
|
||||
'synchronize_ca',
|
||||
'synchronize_ca_if_changed',
|
||||
'update_nrpe_config',
|
||||
# other
|
||||
'check_call',
|
||||
@ -160,8 +161,13 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
'Attempting to associate a postgresql database when there '
|
||||
'is already associated a mysql one')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_db_changed_missing_relation_data(self, configs):
|
||||
def test_db_changed_missing_relation_data(self, configs,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
configs.complete_contexts = MagicMock()
|
||||
configs.complete_contexts.return_value = []
|
||||
hooks.db_changed()
|
||||
@ -169,8 +175,13 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
'shared-db relation incomplete. Peer not ready?'
|
||||
)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_postgresql_db_changed_missing_relation_data(self, configs):
|
||||
def test_postgresql_db_changed_missing_relation_data(self, configs,
|
||||
mock_ensure_leader,
|
||||
mock_log):
|
||||
mock_ensure_leader.return_value = False
|
||||
configs.complete_contexts = MagicMock()
|
||||
configs.complete_contexts.return_value = []
|
||||
hooks.pgsql_db_changed()
|
||||
@ -192,9 +203,14 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
configs.write = MagicMock()
|
||||
hooks.pgsql_db_changed()
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
@patch.object(hooks, 'identity_changed')
|
||||
def test_db_changed_allowed(self, identity_changed, configs):
|
||||
def test_db_changed_allowed(self, identity_changed, configs,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
self.relation_ids.return_value = ['identity-service:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
|
||||
@ -207,9 +223,13 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
@patch.object(hooks, 'identity_changed')
|
||||
def test_db_changed_not_allowed(self, identity_changed, configs):
|
||||
def test_db_changed_not_allowed(self, identity_changed, configs,
|
||||
mock_ensure_ssl_cert_master, mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
self.relation_ids.return_value = ['identity-service:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
|
||||
@ -220,9 +240,13 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.assertFalse(self.ensure_initial_admin.called)
|
||||
self.assertFalse(identity_changed.called)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
@patch.object(hooks, 'identity_changed')
|
||||
def test_postgresql_db_changed(self, identity_changed, configs):
|
||||
def test_postgresql_db_changed(self, identity_changed, configs,
|
||||
mock_ensure_ssl_cert_master, mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
self.relation_ids.return_value = ['identity-service:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
|
||||
@ -235,6 +259,10 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'peer_units')
|
||||
@patch.object(hooks, 'ensure_permissions')
|
||||
@patch.object(hooks, 'admin_relation_changed')
|
||||
@patch.object(hooks, 'cluster_joined')
|
||||
@patch.object(unison, 'ensure_user')
|
||||
@ -245,11 +273,15 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
def test_config_changed_no_openstack_upgrade_leader(
|
||||
self, configure_https, identity_changed,
|
||||
configs, get_homedir, ensure_user, cluster_joined,
|
||||
admin_relation_changed):
|
||||
admin_relation_changed, ensure_permissions, mock_peer_units,
|
||||
mock_ensure_ssl_cert_master, mock_log):
|
||||
self.openstack_upgrade_available.return_value = False
|
||||
self.eligible_leader.return_value = True
|
||||
self.relation_ids.return_value = ['dummyid:0']
|
||||
self.relation_list.return_value = ['unit/0']
|
||||
self.is_elected_leader.return_value = True
|
||||
# avoid having to mock syncer
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
mock_peer_units.return_value = []
|
||||
self.relation_ids.return_value = ['identity-service:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
|
||||
hooks.config_changed()
|
||||
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
|
||||
@ -264,10 +296,13 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.log.assert_called_with(
|
||||
'Firing identity_changed hook for all related services.')
|
||||
identity_changed.assert_called_with(
|
||||
relation_id='dummyid:0',
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
admin_relation_changed.assert_called_with('dummyid:0')
|
||||
admin_relation_changed.assert_called_with('identity-service:0')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'ensure_permissions')
|
||||
@patch.object(hooks, 'cluster_joined')
|
||||
@patch.object(unison, 'ensure_user')
|
||||
@patch.object(unison, 'get_homedir')
|
||||
@ -276,9 +311,12 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
@patch.object(hooks, 'configure_https')
|
||||
def test_config_changed_no_openstack_upgrade_not_leader(
|
||||
self, configure_https, identity_changed,
|
||||
configs, get_homedir, ensure_user, cluster_joined):
|
||||
configs, get_homedir, ensure_user, cluster_joined,
|
||||
ensure_permissions, mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
self.openstack_upgrade_available.return_value = False
|
||||
self.eligible_leader.return_value = False
|
||||
self.is_elected_leader.return_value = False
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
|
||||
hooks.config_changed()
|
||||
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
|
||||
@ -292,6 +330,10 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.assertFalse(self.ensure_initial_admin.called)
|
||||
self.assertFalse(identity_changed.called)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'peer_units')
|
||||
@patch.object(hooks, 'ensure_permissions')
|
||||
@patch.object(hooks, 'admin_relation_changed')
|
||||
@patch.object(hooks, 'cluster_joined')
|
||||
@patch.object(unison, 'ensure_user')
|
||||
@ -302,11 +344,16 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
def test_config_changed_with_openstack_upgrade(
|
||||
self, configure_https, identity_changed,
|
||||
configs, get_homedir, ensure_user, cluster_joined,
|
||||
admin_relation_changed):
|
||||
admin_relation_changed,
|
||||
ensure_permissions, mock_peer_units, mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
self.openstack_upgrade_available.return_value = True
|
||||
self.eligible_leader.return_value = True
|
||||
self.relation_ids.return_value = ['dummyid:0']
|
||||
self.relation_list.return_value = ['unit/0']
|
||||
self.is_elected_leader.return_value = True
|
||||
# avoid having to mock syncer
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
mock_peer_units.return_value = []
|
||||
self.relation_ids.return_value = ['identity-service:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
|
||||
hooks.config_changed()
|
||||
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
|
||||
@ -323,25 +370,33 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.log.assert_called_with(
|
||||
'Firing identity_changed hook for all related services.')
|
||||
identity_changed.assert_called_with(
|
||||
relation_id='dummyid:0',
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
admin_relation_changed.assert_called_with('dummyid:0')
|
||||
admin_relation_changed.assert_called_with('identity-service:0')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'hashlib')
|
||||
@patch.object(hooks, 'send_notifications')
|
||||
def test_identity_changed_leader(self, mock_send_notifications,
|
||||
mock_hashlib):
|
||||
self.eligible_leader.return_value = True
|
||||
mock_hashlib, mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
hooks.identity_changed(
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
self.add_service_to_keystone.assert_called_with(
|
||||
'identity-service:0',
|
||||
'unit/0')
|
||||
self.assertTrue(self.synchronize_ca.called)
|
||||
|
||||
def test_identity_changed_no_leader(self):
|
||||
self.eligible_leader.return_value = False
|
||||
@patch.object(hooks, 'local_unit')
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
def test_identity_changed_no_leader(self, mock_ensure_ssl_cert_master,
|
||||
mock_log, mock_local_unit):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
mock_local_unit.return_value = 'unit/0'
|
||||
self.is_elected_leader.return_value = False
|
||||
hooks.identity_changed(
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
@ -349,23 +404,44 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.log.assert_called_with(
|
||||
'Deferring identity_changed() to service leader.')
|
||||
|
||||
@patch.object(hooks, 'local_unit')
|
||||
@patch.object(hooks, 'peer_units')
|
||||
@patch.object(unison, 'ssh_authorized_peers')
|
||||
def test_cluster_joined(self, ssh_authorized_peers):
|
||||
def test_cluster_joined(self, ssh_authorized_peers, mock_peer_units,
|
||||
mock_local_unit):
|
||||
mock_local_unit.return_value = 'unit/0'
|
||||
mock_peer_units.return_value = ['unit/0']
|
||||
hooks.cluster_joined()
|
||||
ssh_authorized_peers.assert_called_with(
|
||||
user=self.ssh_user, group='juju_keystone',
|
||||
peer_interface='cluster', ensure_local_user=True)
|
||||
|
||||
@patch.object(hooks, 'is_ssl_cert_master')
|
||||
@patch.object(hooks, 'peer_units')
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch('keystone_utils.synchronize_ca')
|
||||
@patch.object(hooks, 'check_peer_actions')
|
||||
@patch.object(unison, 'ssh_authorized_peers')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_cluster_changed(self, configs, ssh_authorized_peers):
|
||||
def test_cluster_changed(self, configs, ssh_authorized_peers,
|
||||
check_peer_actions, mock_synchronize_ca,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_log, mock_peer_units,
|
||||
mock_is_ssl_cert_master):
|
||||
mock_is_ssl_cert_master.return_value = False
|
||||
mock_peer_units.return_value = ['unit/0']
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
self.is_elected_leader.return_value = False
|
||||
self.relation_get.return_value = {'foo_passwd': '123',
|
||||
'identity-service:16_foo': 'bar'}
|
||||
hooks.cluster_changed()
|
||||
self.peer_echo.assert_called_with(includes=['_passwd',
|
||||
'identity-service:'])
|
||||
self.peer_echo.assert_called_with(includes=['foo_passwd',
|
||||
'identity-service:16_foo'])
|
||||
ssh_authorized_peers.assert_called_with(
|
||||
user=self.ssh_user, group='keystone',
|
||||
peer_interface='cluster', ensure_local_user=True)
|
||||
self.assertTrue(self.synchronize_ca.called)
|
||||
self.assertFalse(mock_synchronize_ca.called)
|
||||
self.assertTrue(configs.write_all.called)
|
||||
|
||||
def test_ha_joined(self):
|
||||
@ -440,34 +516,50 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
}
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch('keystone_utils.synchronize_ca')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_ha_relation_changed_not_clustered_not_leader(self, configs):
|
||||
def test_ha_relation_changed_not_clustered_not_leader(self, configs,
|
||||
mock_synchronize_ca,
|
||||
mock_is_master,
|
||||
mock_log):
|
||||
mock_is_master.return_value = False
|
||||
self.relation_get.return_value = False
|
||||
self.is_leader.return_value = False
|
||||
self.is_elected_leader.return_value = False
|
||||
|
||||
hooks.ha_changed()
|
||||
self.assertTrue(configs.write_all.called)
|
||||
self.assertFalse(mock_synchronize_ca.called)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'identity_changed')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_ha_relation_changed_clustered_leader(
|
||||
self, configs, identity_changed):
|
||||
def test_ha_relation_changed_clustered_leader(self, configs,
|
||||
identity_changed,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
self.relation_get.return_value = True
|
||||
self.is_leader.return_value = True
|
||||
self.is_elected_leader.return_value = True
|
||||
self.relation_ids.return_value = ['identity-service:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
|
||||
hooks.ha_changed()
|
||||
self.assertTrue(configs.write_all.called)
|
||||
self.log.assert_called_with(
|
||||
'Cluster configured, notifying other services and updating '
|
||||
'keystone endpoint configuration')
|
||||
'Firing identity_changed hook for all related services.')
|
||||
identity_changed.assert_called_with(
|
||||
relation_id='identity-service:0',
|
||||
remote_unit='unit/0')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_configure_https_enable(self, configs):
|
||||
def test_configure_https_enable(self, configs, mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
configs.complete_contexts = MagicMock()
|
||||
configs.complete_contexts.return_value = ['https']
|
||||
configs.write = MagicMock()
|
||||
@ -477,8 +569,13 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
cmd = ['a2ensite', 'openstack_https_frontend']
|
||||
self.check_call.assert_called_with(cmd)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_configure_https_disable(self, configs):
|
||||
def test_configure_https_disable(self, configs,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_log):
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
configs.complete_contexts = MagicMock()
|
||||
configs.complete_contexts.return_value = ['']
|
||||
configs.write = MagicMock()
|
||||
@ -488,30 +585,61 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
cmd = ['a2dissite', 'openstack_https_frontend']
|
||||
self.check_call.assert_called_with(cmd)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.relation_ids')
|
||||
@patch('keystone_utils.is_elected_leader')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch('keystone_utils.update_hash_from_path')
|
||||
@patch('keystone_utils.synchronize_ca')
|
||||
@patch.object(unison, 'ssh_authorized_peers')
|
||||
def test_upgrade_charm_leader(self, ssh_authorized_peers):
|
||||
self.eligible_leader.return_value = True
|
||||
def test_upgrade_charm_leader(self, ssh_authorized_peers,
|
||||
mock_synchronize_ca,
|
||||
mock_update_hash_from_path,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_is_elected_leader,
|
||||
mock_relation_ids,
|
||||
mock_log):
|
||||
mock_is_elected_leader.return_value = False
|
||||
mock_relation_ids.return_value = []
|
||||
mock_ensure_ssl_cert_master.return_value = True
|
||||
# Ensure always returns diff
|
||||
mock_update_hash_from_path.side_effect = \
|
||||
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
|
||||
|
||||
self.is_elected_leader.return_value = True
|
||||
self.filter_installed_packages.return_value = []
|
||||
hooks.upgrade_charm()
|
||||
self.assertTrue(self.apt_install.called)
|
||||
ssh_authorized_peers.assert_called_with(
|
||||
user=self.ssh_user, group='keystone',
|
||||
peer_interface='cluster', ensure_local_user=True)
|
||||
self.assertTrue(self.synchronize_ca.called)
|
||||
self.assertTrue(mock_synchronize_ca.called)
|
||||
self.log.assert_called_with(
|
||||
'Cluster leader - ensuring endpoint configuration'
|
||||
' is up to date')
|
||||
'Firing identity_changed hook for all related services.')
|
||||
self.assertTrue(self.ensure_initial_admin.called)
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch('keystone_utils.relation_ids')
|
||||
@patch('keystone_utils.ensure_ssl_cert_master')
|
||||
@patch('keystone_utils.update_hash_from_path')
|
||||
@patch.object(unison, 'ssh_authorized_peers')
|
||||
def test_upgrade_charm_not_leader(self, ssh_authorized_peers):
|
||||
self.eligible_leader.return_value = False
|
||||
def test_upgrade_charm_not_leader(self, ssh_authorized_peers,
|
||||
mock_update_hash_from_path,
|
||||
mock_ensure_ssl_cert_master,
|
||||
mock_relation_ids,
|
||||
mock_log):
|
||||
mock_relation_ids.return_value = []
|
||||
mock_ensure_ssl_cert_master.return_value = False
|
||||
# Ensure always returns diff
|
||||
mock_update_hash_from_path.side_effect = \
|
||||
lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
|
||||
|
||||
self.is_elected_leader.return_value = False
|
||||
self.filter_installed_packages.return_value = []
|
||||
hooks.upgrade_charm()
|
||||
self.assertTrue(self.apt_install.called)
|
||||
ssh_authorized_peers.assert_called_with(
|
||||
user=self.ssh_user, group='keystone',
|
||||
peer_interface='cluster', ensure_local_user=True)
|
||||
self.assertTrue(self.synchronize_ca.called)
|
||||
self.assertFalse(self.log.called)
|
||||
self.assertFalse(self.ensure_initial_admin.called)
|
||||
|
@ -26,9 +26,8 @@ TO_PATCH = [
|
||||
'get_os_codename_install_source',
|
||||
'grant_role',
|
||||
'configure_installation_source',
|
||||
'eligible_leader',
|
||||
'is_elected_leader',
|
||||
'https',
|
||||
'is_clustered',
|
||||
'peer_store_and_set',
|
||||
'service_stop',
|
||||
'service_start',
|
||||
@ -115,7 +114,7 @@ class TestKeystoneUtils(CharmTestCase):
|
||||
self, migrate_database, determine_packages, configs):
|
||||
self.test_config.set('openstack-origin', 'precise')
|
||||
determine_packages.return_value = []
|
||||
self.eligible_leader.return_value = True
|
||||
self.is_elected_leader.return_value = True
|
||||
|
||||
utils.do_openstack_upgrade(configs)
|
||||
|
||||
@ -202,7 +201,6 @@ class TestKeystoneUtils(CharmTestCase):
|
||||
self.resolve_address.return_value = '10.0.0.3'
|
||||
self.test_config.set('admin-port', 80)
|
||||
self.test_config.set('service-port', 81)
|
||||
self.is_clustered.return_value = False
|
||||
self.https.return_value = False
|
||||
self.test_config.set('https-service-endpoints', 'False')
|
||||
self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
|
||||
|
Loading…
x
Reference in New Issue
Block a user