Install cron job to flush keystone tokens.

This change adds a cron job definition to flush the keystone tokens
once every hour. Without this, the keystone database grows unbounded,
which can be problematic in production environments.

This change introduces a new keystone-token-flush templated cron job,
which will run the keystone-manage token_flush command as the keystone
user once per hour. This change honors the use-syslog setting by
sending output of the command either to the keystone-token-flush.log
file or to the syslog using the logger exec.

Only the juju service leader will have the cron job active in order to
prevent multiple units from running the token_flush at the concurrently.

Change-Id: I21be3b23a8fe66b67fba0654ce498d62b3afc2ac
Closes-Bug: #1467832
This commit is contained in:
Billy Olsen 2016-03-06 12:19:47 -07:00
parent 86f3adbc62
commit 55274a7867
7 changed files with 70 additions and 2 deletions

View File

@ -12,8 +12,10 @@ from charmhelpers.core.host import (
from charmhelpers.contrib.openstack import context from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
DC_RESOURCE_NAME,
determine_apache_port, determine_apache_port,
determine_api_port, determine_api_port,
is_elected_leader,
) )
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -264,3 +266,12 @@ class KeystoneLoggingContext(context.OSContextGenerator):
ctxt['log_level'] = None ctxt['log_level'] = None
return ctxt return ctxt
class TokenFlushContext(context.OSContextGenerator):
def __call__(self):
ctxt = {
'token_flush': is_elected_leader(DC_RESOURCE_NAME)
}
return ctxt

View File

@ -66,6 +66,7 @@ from keystone_utils import (
CLUSTER_RES, CLUSTER_RES,
KEYSTONE_CONF, KEYSTONE_CONF,
POLICY_JSON, POLICY_JSON,
TOKEN_FLUSH_CRON_FILE,
SSH_USER, SSH_USER,
setup_ipv6, setup_ipv6,
send_notifications, send_notifications,
@ -492,12 +493,25 @@ def cluster_changed():
CONFIGS.write_all() CONFIGS.write_all()
@hooks.hook('leader-elected')
def leader_elected():
log('Unit has been elected leader.', level=DEBUG)
# When the local unit has been elected the leader, update the cron jobs
# to ensure that the cron jobs are active on this unit.
CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
@hooks.hook('leader-settings-changed') @hooks.hook('leader-settings-changed')
def leader_settings_changed(): def leader_settings_changed():
# Since minions are notified of a regime change via the
# leader-settings-changed hook, rewrite the token flush cron job to make
# sure only the leader is running the cron job.
CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
log('Firing identity_changed hook for all related services.') log('Firing identity_changed hook for all related services.')
for rid in relation_ids('identity-service'): for rid in relation_ids('identity-service'):
for unit in related_units(rid): for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit) identity_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('ha-relation-joined') @hooks.hook('ha-relation-joined')

View File

@ -188,6 +188,7 @@ SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]
ADMIN_DOMAIN = 'admin_domain' ADMIN_DOMAIN = 'admin_domain'
DEFAULT_DOMAIN = 'Default' DEFAULT_DOMAIN = 'Default'
POLICY_JSON = '/etc/keystone/policy.json' POLICY_JSON = '/etc/keystone/policy.json'
TOKEN_FLUSH_CRON_FILE = '/etc/cron.d/keystone-token-flush'
BASE_RESOURCE_MAP = OrderedDict([ BASE_RESOURCE_MAP = OrderedDict([
(KEYSTONE_CONF, { (KEYSTONE_CONF, {
@ -221,6 +222,11 @@ BASE_RESOURCE_MAP = OrderedDict([
'contexts': [keystone_context.KeystoneContext()], 'contexts': [keystone_context.KeystoneContext()],
'services': BASE_SERVICES, 'services': BASE_SERVICES,
}), }),
(TOKEN_FLUSH_CRON_FILE, {
'contexts': [keystone_context.TokenFlushContext(),
context.SyslogContext()],
'services': [],
}),
]) ])
valid_services = { valid_services = {

1
hooks/leader-elected Symbolic link
View File

@ -0,0 +1 @@
keystone_hooks.py

View File

@ -0,0 +1,10 @@
# Purge expired tokens from the keystone database hourly, per OpenStack installation guide.
{% if token_flush -%}
{% if use_syslog -%}
0 * * * * keystone /usr/bin/keystone-manage token_flush 2>&1 | logger -t keystone-token-flush
{% else -%}
0 * * * * /usr/bin/keystone-manage token_flush >> /var/log/keystone/keystone-token-flush.log 2>&1
{% endif -%}
{% else -%}
# Current unit is not the leader unit. Token flush will be managed by the leader unit.
{% endif -%}

View File

@ -168,3 +168,13 @@ class TestKeystoneContexts(CharmTestCase):
mock_config.return_value = None mock_config.return_value = None
self.assertEqual({'log_level': None}, ctxt()) self.assertEqual({'log_level': None}, ctxt())
@patch.object(context, 'is_elected_leader')
def test_token_flush_context(self, mock_is_elected_leader):
ctxt = context.TokenFlushContext()
mock_is_elected_leader.return_value = False
self.assertEqual({'token_flush': False}, ctxt())
mock_is_elected_leader.return_value = True
self.assertEqual({'token_flush': True}, ctxt())

View File

@ -740,6 +740,22 @@ class KeystoneRelationTests(CharmTestCase):
self.assertFalse(mock_synchronize_ca.called) self.assertFalse(mock_synchronize_ca.called)
self.assertTrue(configs.write_all.called) self.assertTrue(configs.write_all.called)
@patch.object(hooks.CONFIGS, 'write')
def test_leader_elected(self, mock_write):
hooks.leader_elected()
mock_write.assert_has_calls([call(utils.TOKEN_FLUSH_CRON_FILE)])
@patch.object(hooks.CONFIGS, 'write')
@patch.object(hooks, 'identity_changed')
def test_leader_settings_changed(self, mock_identity_changed,
mock_write):
self.relation_ids.return_value = ['identity:1']
self.related_units.return_value = ['keystone/1']
hooks.leader_settings_changed()
mock_write.assert_has_calls([call(utils.TOKEN_FLUSH_CRON_FILE)])
exp = [call(relation_id='identity:1', remote_unit='keystone/1')]
mock_identity_changed.assert_has_calls(exp)
def test_ha_joined(self): def test_ha_joined(self):
self.get_hacluster_config.return_value = { self.get_hacluster_config.return_value = {
'vip': '10.10.10.10', 'vip': '10.10.10.10',