[hopem,r=gnuoy]

Fixes disable ssl. Allows disable of use-https and https-service-endpoints.

Use '__null__' value to flush out peer relation settings that need to be unset
when forwared to other relations. This will fix ssl disable by ensuring that
peer settings are correctly forwarded to endpoint relations.

Closes-Bug: 1427906
This commit is contained in:
Edward Hope-Morley 2015-03-10 15:06:40 +00:00
commit a831a81594
5 changed files with 84 additions and 71 deletions

View File

@ -72,6 +72,7 @@ from keystone_utils import (
is_db_ready,
clear_ssl_synced_units,
is_db_initialised,
filter_null,
)
from charmhelpers.contrib.hahelpers.cluster import (
@ -87,7 +88,6 @@ from charmhelpers.contrib.peerstorage import (
)
from charmhelpers.contrib.openstack.ip import (
ADMIN,
PUBLIC,
resolve_address,
)
from charmhelpers.contrib.network.ip import (
@ -154,7 +154,7 @@ def config_changed():
for rid in relation_ids('identity-admin'):
admin_relation_changed(rid)
# Ensure sync request is sent out (needed for upgrade to ssl from non-ssl)
# Ensure sync request is sent out (needed for any/all ssl change)
send_ssl_sync_request()
for r_id in relation_ids('ha'):
@ -297,6 +297,8 @@ def identity_changed(relation_id=None, remote_unit=None):
# with the info dies the settings die with it Bug# 1355848
for rel_id in relation_ids('identity-service'):
peerdb_settings = peer_retrieve_by_prefix(rel_id)
# Ensure the null'd settings are unset in the relation.
peerdb_settings = filter_null(peerdb_settings)
if 'service_password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('Deferring identity_changed() to service leader.')
@ -323,22 +325,31 @@ def send_ssl_sync_request():
if bool_from_string(config('https-service-endpoints')):
count += 2
if count:
key = 'ssl-sync-required-%s' % (unit)
settings = {key: count}
prev = 0
rid = None
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_prev = relation_get(rid=rid, unit=unit, attribute=key) or 0
if _prev and _prev > prev:
prev = _prev
key = 'ssl-sync-required-%s' % (unit)
settings = {key: count}
if rid and prev < count:
clear_ssl_synced_units()
log("Setting %s=%s" % (key, count), level=DEBUG)
# If all ssl is disabled ensure this is set to 0 so that cluster hook runs
# and endpoints are updated.
if not count:
log("Setting %s=%s" % (key, count), level=DEBUG)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings=settings)
return
prev = 0
rid = None
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_prev = relation_get(rid=rid, unit=unit, attribute=key) or 0
if _prev and _prev > prev:
prev = _prev
if rid and prev < count:
clear_ssl_synced_units()
log("Setting %s=%s" % (key, count), level=DEBUG)
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined():
@ -364,37 +375,6 @@ def cluster_joined():
send_ssl_sync_request()
def apply_echo_filters(settings, echo_whitelist):
"""Filter settings to be peer_echo'ed.
We may have received some data that we don't want to re-echo so filter
out unwanted keys and provide overrides.
Returns:
tuple(filtered list of keys to be echoed, overrides for keys omitted)
"""
filtered = []
overrides = {}
for key in settings.iterkeys():
for ekey in echo_whitelist:
if ekey in key:
if ekey == 'identity-service:':
auth_host = resolve_address(ADMIN)
service_host = resolve_address(PUBLIC)
if (key.endswith('auth_host') and
settings[key] != auth_host):
overrides[key] = auth_host
continue
elif (key.endswith('service_host') and
settings[key] != service_host):
overrides[key] = service_host
continue
filtered.append(key)
return filtered, overrides
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
@ -403,16 +383,11 @@ def cluster_changed():
group='juju_keystone',
peer_interface='cluster',
ensure_local_user=True)
settings = relation_get()
# NOTE(jamespage) re-echo passwords for peer storage
echo_whitelist, overrides = \
apply_echo_filters(settings, ['_passwd', 'identity-service:',
'ssl-cert-master', 'db-initialised'])
log("Peer echo overrides: %s" % (overrides), level=DEBUG)
relation_set(**overrides)
if echo_whitelist:
log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
peer_echo(includes=echo_whitelist)
echo_whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master',
'db-initialised']
log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
peer_echo(includes=echo_whitelist)
check_peer_actions()

View File

@ -230,6 +230,25 @@ valid_services = {
}
def filter_null(settings, null='__null__'):
"""Replace null values with None in provided settings dict.
When storing values in the peer relation, it might be necessary at some
future point to flush these values. We therefore need to use a real
(non-None or empty string) value to represent an unset settings. This value
then needs to be converted to None when applying to a non-cluster relation
so that the value is actually unset.
"""
filtered = {}
for k, v in settings.iteritems():
if v == null:
filtered[k] = None
else:
filtered[k] = v
return filtered
def resource_map():
"""Dynamically generate a map of resources that will be managed for a
single hook execution.
@ -1294,6 +1313,9 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
# we return a token, information about our API endpoints, and the generated
# service credentials
service_tenant = config('service-tenant')
# NOTE(dosaboy): we use __null__ to represent settings that are to be
# routed to relations via the cluster relation and set to None.
relation_data = {
"admin_token": token,
"service_host": resolve_address(PUBLIC),
@ -1304,10 +1326,10 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
"service_password": service_password,
"service_tenant": service_tenant,
"service_tenant_id": manager.resolve_tenant_id(service_tenant),
"https_keystone": "False",
"ssl_cert": "",
"ssl_key": "",
"ca_cert": "",
"https_keystone": '__null__',
"ssl_cert": '__null__',
"ssl_key": '__null__',
"ca_cert": '__null__',
"auth_protocol": protocol,
"service_protocol": protocol,
}
@ -1331,7 +1353,12 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True'
peer_store_and_set(relation_id=relation_id, **relation_data)
# NOTE(dosaboy): '__null__' settings are for peer relation only so that
# settings can flushed so we filter them out for non-peer relation.
filtered = filter_null(relation_data)
relation_set(relation_id=relation_id, **filtered)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, **relation_data)
def ensure_valid_service(service):

View File

@ -253,7 +253,6 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'https_keystone': 'False',
'auth_host': u.valid_ip,
'service_username': 'cinder',
'service_tenant_id': u.not_null,

View File

@ -273,6 +273,7 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'send_ssl_sync_request')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'peer_units')
@ -289,6 +290,7 @@ class KeystoneRelationTests(CharmTestCase):
configs, get_homedir, ensure_user, cluster_joined,
admin_relation_changed, ensure_permissions, mock_peer_units,
mock_is_db_ready, mock_is_db_initialised,
mock_send_ssl_sync_request,
mock_ensure_ssl_cert_master, mock_log):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True
@ -348,6 +350,7 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'send_ssl_sync_request')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'peer_units')
@ -368,6 +371,7 @@ class KeystoneRelationTests(CharmTestCase):
mock_peer_units,
mock_is_db_ready,
mock_is_db_initialised,
mock_send_ssl_sync_request,
mock_ensure_ssl_cert_master,
mock_log):
mock_is_db_ready.return_value = True
@ -462,11 +466,10 @@ class KeystoneRelationTests(CharmTestCase):
mock_peer_units.return_value = ['unit/0']
mock_ensure_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = False
self.relation_get.return_value = {'foo_passwd': '123',
'identity-service:16_foo': 'bar'}
hooks.cluster_changed()
self.peer_echo.assert_called_with(includes=['foo_passwd',
'identity-service:16_foo'])
whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master',
'db-initialised']
self.peer_echo.assert_called_with(includes=whitelist)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)

View File

@ -210,6 +210,7 @@ class TestKeystoneUtils(CharmTestCase):
self.https.return_value = False
self.test_config.set('https-service-endpoints', 'False')
self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
self.relation_ids.return_value = ['cluster/0']
mock_keystone = MagicMock()
mock_keystone.resolve_tenant_id.return_value = 'tenant_id'
@ -239,15 +240,23 @@ class TestKeystoneUtils(CharmTestCase):
'auth_port': 80, 'service_username': 'keystone',
'service_password': 'password',
'service_tenant': 'tenant',
'https_keystone': 'False',
'ssl_cert': '', 'ssl_key': '',
'ca_cert': '', 'auth_host': '10.0.0.3',
'https_keystone': '__null__',
'ssl_cert': '__null__', 'ssl_key': '__null__',
'ca_cert': '__null__', 'auth_host': '10.0.0.3',
'service_host': '10.0.0.3',
'auth_protocol': 'http', 'service_protocol': 'http',
'service_tenant_id': 'tenant_id'}
self.peer_store_and_set.assert_called_with(
relation_id=relation_id,
**relation_data)
filtered = {}
for k, v in relation_data.iteritems():
if v == '__null__':
filtered[k] = None
else:
filtered[k] = v
call1 = call(relation_id=relation_id, **filtered)
call2 = call(relation_id='cluster/0', **relation_data)
self.relation_set.assert_has_calls([call1, call2])
@patch.object(utils, 'ensure_valid_service')
@patch.object(utils, 'add_endpoint')