Implement caching for Tokens and Token Validation

Based upon the keystone caching (using dogpile.cache) implementation
token caching is implemented in this patchset.

This patchset inclues basic Token caching as the first implementation
of this caching layer.  The following methods are cached:
    * token_api.get_token,
    * token_api.list_revoked_tokens
    * token_provider_api.validate_v3_token
    * token_provider_api.check_v3_token
    * token_provider_api.check_v2_token
    * token_provider_api.validate_v2_token
    * token_provider_api.validate_token

Calls to token_api.delete_token and token_api.delete_tokens will
properly invalidate the cache for the tokens being acted upon, as
well as invalidating the cache for the revoked_tokens_list and the
validate/check token calls.

Token caching is configurable independantly of the revocation_list
caching.

Lifted expiration checks from the token drivers to the token manager.
This ensures that cached tokens will still raise TokenNotFound when
expired.

For cache consistency, all token_ids are transformed into the short
token hash at the provider and token_driver level. Some methods would
have access to the full ID (PKI Tokens), and some methods would not.
Cache invalidation is inconsistent without token_id normalization.

DocImpact

partial-blueprint: caching-layer-for-driver-calls
Change-Id: Ifada0db0043fede504a091670ccc521196c2f19c
This commit is contained in:
Morgan Fainberg 2013-08-24 19:16:30 -07:00
parent 6dce45b1d2
commit 53a83c2073
8 changed files with 121 additions and 31 deletions

View File

@ -255,10 +255,15 @@ behavior is that subsystem caching is enabled, but the global toggle is set to d
Current keystone systems that have caching capabilities:
* ``token``
The token system has a separate ``cache_time`` configuration option, that
can be set to a value above or below the global ``expiration_time`` default,
allowing for different caching behavior from the other systems in ``Keystone``.
This option is set in the ``[token]`` section of the configuration file.
The Token Revocation List cache time is handled by the configuration option
``revocation_cache_time`` in the ``[token]`` section. The revocation
list is refreshed whenever a token is revoked, and sees significantly more
requests than specific tokens or token validation of specific tokens will see.
list is refreshed whenever a token is revoked. It typically sees significantly
more requests than specific token retrievals or token validation calls.
For more information about the different backends (and configuration options):
* `dogpile.cache.backends.memory`_

View File

@ -164,6 +164,9 @@
# option is set to True
# caching = True
# Token specific cache time-to-live (TTL) in seconds.
# cache_time =
# Revocation-List specific cache time-to-live (TTL) in seconds.
# revocation_cache_time = 3600

View File

@ -43,18 +43,18 @@ class DebugProxy(proxy.ProxyBackend):
"""Extra Logging ProxyBackend."""
def get(self, key):
value = self.proxied.get(key)
msg = _('CACHE_GET: Key: "%(key)s)" "%(value)s"')
msg = _('CACHE_GET: Key: "%(key)s" Value: "%(value)s"')
LOG.debug(msg % {'key': key, 'value': value})
return value
def get_multi(self, keys):
values = self.proxied.get_multi(keys)
msg = _('CACHE_GET_MULTI: "%(key)s" "%(value)s"')
msg = _('CACHE_GET_MULTI: "%(keys)s" Values: "%(values)s"')
LOG.debug(msg % {'keys': keys, 'values': values})
return values
def set(self, key, value):
msg = _('CACHE_SET: Key: %(key)s Value: %(value)s')
msg = _('CACHE_SET: Key: "%(key)s" Value: "%(value)s"')
LOG.debug(msg % {'key': key, 'value': value})
return self.proxied.set(key, value)

View File

@ -72,7 +72,8 @@ FILE_OPTIONS = {
cfg.StrOpt('driver',
default='keystone.token.backends.sql.Token'),
cfg.BoolOpt('caching', default=True),
cfg.IntOpt('revocation_cache_time', default=3600)],
cfg.IntOpt('revocation_cache_time', default=3600),
cfg.IntOpt('cache_time', default=None)],
'cache': [
cfg.StrOpt('config_prefix', default='cache.keystone'),
cfg.IntOpt('expiration_time', default=600),

View File

@ -42,15 +42,9 @@ class Token(kvs.Base, token.Driver):
def get_token(self, token_id):
try:
ref = self.db.get('token-%s' % token_id)
except exception.NotFound:
raise exception.TokenNotFound(token_id=token_id)
now = timeutils.utcnow()
expiry = ref['expires']
if expiry is None:
raise exception.TokenNotFound(token_id=token_id)
if expiry > now:
return copy.deepcopy(ref)
else:
except Exception:
# On any issues here, Token is not found.
raise exception.TokenNotFound(token_id=token_id)
def create_token(self, token_id, data):

View File

@ -15,7 +15,6 @@
# under the License.
import copy
import datetime
from keystone.common import sql
from keystone import exception
@ -45,13 +44,8 @@ class Token(sql.Base, token.Driver):
raise exception.TokenNotFound(token_id=token_id)
session = self.get_session()
token_ref = session.query(TokenModel).get(token_id)
now = datetime.datetime.utcnow()
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
if not token_ref.expires:
raise exception.TokenNotFound(token_id=token_id)
if now >= token_ref.expires:
raise exception.TokenNotFound(token_id=token_id)
return token_ref.to_dict()
def create_token(self, token_id, data):

View File

@ -91,6 +91,7 @@ def validate_auth_info(self, user_ref, tenant_ref):
raise exception.Unauthorized(msg)
@dependency.requires('token_provider_api')
@dependency.provider('token_api')
class Manager(manager.Manager):
"""Default pivot point for the Token backend.
@ -103,7 +104,7 @@ class Manager(manager.Manager):
def __init__(self):
super(Manager, self).__init__(CONF.token.driver)
def _unique_id(self, token_id):
def unique_id(self, token_id):
"""Return a unique ID for a token.
The returned value is useful as the primary key of a database table,
@ -115,21 +116,55 @@ class Manager(manager.Manager):
"""
return cms.cms_hash_token(token_id)
def _assert_valid(self, token_id, token_ref):
"""Raise TokenNotFound if the token is expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
expires = token_ref.get('expires')
if not expires or current_time > timeutils.normalize_time(expires):
raise exception.TokenNotFound(token_id=token_id)
def get_token(self, token_id):
return self.driver.get_token(self._unique_id(token_id))
unique_id = self.unique_id(token_id)
token_ref = self._get_token(unique_id)
# NOTE(morganfainberg): Lift expired checking to the manager, there is
# no reason to make the drivers implement this check. With caching,
# self._get_token could return an expired token. Make sure we behave
# as expected and raise TokenNotFound on those instances.
self._assert_valid(token_id, token_ref)
return token_ref
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=CONF.token.cache_time)
def _get_token(self, token_id):
# Only ever use the "unique" id in the cache key.
return self.driver.get_token(token_id)
def create_token(self, token_id, data):
unique_id = self.unique_id(token_id)
data_copy = copy.deepcopy(data)
data_copy['id'] = self._unique_id(token_id)
return self.driver.create_token(self._unique_id(token_id), data_copy)
data_copy['id'] = unique_id
ret = self.driver.create_token(unique_id, data_copy)
if SHOULD_CACHE(ret):
# NOTE(morganfainberg): when doing a cache set, you must pass the
# same arguments through, the same as invalidate (this includes
# "self"). First argument is always the value to be cached
self._get_token.set(ret, self, unique_id)
return ret
def delete_token(self, token_id):
self.driver.delete_token(self._unique_id(token_id))
unique_id = self.unique_id(token_id)
self.driver.delete_token(unique_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
token_list = self.driver.list_tokens(user_id, tenant_id, trust_id,
consumer_id)
self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id)
for token_id in token_list:
unique_id = self.unique_id(token_id)
self._invalidate_individual_token_cache(unique_id, tenant_id)
self.invalidate_revocation_list()
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
@ -146,6 +181,19 @@ class Manager(manager.Manager):
# determining cache-keys.
self.list_revoked_tokens.refresh(self)
def _invalidate_individual_token_cache(self, token_id, belongs_to=None):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._get_token.invalidate(self, token_id)
self.token_provider_api.invalidate_individual_token_cache(token_id,
belongs_to)
class Driver(object):
"""Interface description for a Token driver."""

View File

@ -17,6 +17,7 @@
"""Token provider interface."""
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import config
@ -27,6 +28,7 @@ from keystone.openstack.common import timeutils
CONF = config.CONF
LOG = logging.getLogger(__name__)
SHOULD_CACHE = cache.should_cache_fn('token')
# supported token versions
@ -101,17 +103,26 @@ class Manager(manager.Manager):
super(Manager, self).__init__(self.get_token_provider())
def validate_token(self, token_id, belongs_to=None):
token = self.driver.validate_token(token_id, belongs_to)
unique_id = self.token_api.unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_token(unique_id, belongs_to)
self._is_valid_token(token)
return token
def validate_v2_token(self, token_id, belongs_to=None):
token = self.driver.validate_v2_token(token_id, belongs_to)
unique_id = self.token_api.unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_v2_token(unique_id, belongs_to)
self._is_valid_token(token)
return token
def validate_v3_token(self, token_id):
token = self.driver.validate_v3_token(token_id)
unique_id = self.token_api.unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_v3_token(unique_id)
self._is_valid_token(token)
return token
@ -123,7 +134,10 @@ class Manager(manager.Manager):
:returns: None
:raises: keystone.exception.Unauthorized
"""
self.validate_v2_token(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
unique_id = self.token_api.unique_id(token_id)
self.validate_v2_token(unique_id, belongs_to=belongs_to)
def check_v3_token(self, token_id):
"""Check the validity of the given V3 token.
@ -132,11 +146,28 @@ class Manager(manager.Manager):
:returns: None
:raises: keystone.exception.Unauthorized
"""
self.validate_v3_token(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
unique_id = self.token_api.unique_id(token_id)
self.validate_v3_token(unique_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=CONF.token.cache_time)
def _validate_token(self, token_id, belongs_to=None):
return self.driver.validate_token(token_id, belongs_to)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=CONF.token.cache_time)
def _validate_v2_token(self, token_id, belongs_to=None):
return self.driver.validate_v2_token(token_id, belongs_to)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=CONF.token.cache_time)
def _validate_v3_token(self, token_id):
return self.driver.validate_v3_token(token_id)
def _is_valid_token(self, token):
# Verify the token has not expired.
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
@ -159,6 +190,20 @@ class Manager(manager.Manager):
# Token is expired, we have a malformed token, or something went wrong.
raise exception.Unauthorized(_('Failed to validate token'))
def invalidate_individual_token_cache(self, token_id, belongs_to=None):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._validate_v3_token.invalidate(self, token_id)
self._validate_v2_token.invalidate(self, token_id)
self._validate_v2_token.invalidate(self, token_id, belongs_to)
self._validate_token.invalidate(self, token_id, belongs_to)
class Provider(object):
"""Interface description for a Token provider."""