diff --git a/keystone/backends.py b/keystone/backends.py index 91709618da..42e47945a6 100644 --- a/keystone/backends.py +++ b/keystone/backends.py @@ -41,7 +41,7 @@ def load_backends(): id_mapping_api=identity.MappingManager(), identity_api=_IDENTITY_API, policy_api=policy.Manager(), - token_api=token.Manager(), + token_api=token.persistence.Manager(), trust_api=trust.Manager(), token_provider_api=token.provider.Manager()) diff --git a/keystone/cli.py b/keystone/cli.py index 8a22d5f724..d8fc8ce6ca 100644 --- a/keystone/cli.py +++ b/keystone/cli.py @@ -172,7 +172,7 @@ class TokenFlush(BaseApp): @classmethod def main(cls): - token_manager = token.Manager() + token_manager = token.persistence.Manager() token_manager.driver.flush_expired_tokens() diff --git a/keystone/common/config.py b/keystone/common/config.py index 4363896047..91c1e9475c 100644 --- a/keystone/common/config.py +++ b/keystone/common/config.py @@ -238,7 +238,7 @@ FILE_OPTIONS = { '"keystone.token.providers.[pkiz|pki|uuid].' 'Provider". The default provider is pkiz.'), cfg.StrOpt('driver', - default='keystone.token.backends.sql.Token', + default='keystone.token.persistence.backends.sql.Token', help='Token persistence backend driver.'), cfg.BoolOpt('caching', default=True, help='Toggle for token system cacheing. This has no ' diff --git a/keystone/tests/core.py b/keystone/tests/core.py index 0a727991e3..5e41138bda 100644 --- a/keystone/tests/core.py +++ b/keystone/tests/core.py @@ -367,7 +367,7 @@ class TestCase(BaseTestCase): ca_certs='examples/pki/certs/cacert.pem') self.config_fixture.config( group='token', - driver='keystone.token.backends.kvs.Token') + driver='keystone.token.persistence.backends.kvs.Token') self.config_fixture.config( group='trust', driver='keystone.trust.backends.kvs.Trust') @@ -759,7 +759,7 @@ class SQLDriverOverrides(object): driver='keystone.contrib.revoke.backends.sql.Revoke') self.config_fixture.config( group='token', - driver='keystone.token.backends.sql.Token') + driver='keystone.token.persistence.backends.sql.Token') self.config_fixture.config( group='trust', driver='keystone.trust.backends.sql.Trust') diff --git a/keystone/tests/test_backend_sql.py b/keystone/tests/test_backend_sql.py index b6a3c0c469..21f9cc2893 100644 --- a/keystone/tests/test_backend_sql.py +++ b/keystone/tests/test_backend_sql.py @@ -29,7 +29,7 @@ from keystone import tests from keystone.tests import default_fixtures from keystone.tests.ksfixtures import database from keystone.tests import test_backend -from keystone.token.backends import sql as token_sql +from keystone.token.persistence.backends import sql as token_sql CONF = config.CONF diff --git a/keystone/tests/unit/token/__init__.py b/keystone/tests/unit/token/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/keystone/tests/unit/token/test_token_persistence_proxy.py b/keystone/tests/unit/token/test_token_persistence_proxy.py new file mode 100644 index 0000000000..b0d9e20052 --- /dev/null +++ b/keystone/tests/unit/token/test_token_persistence_proxy.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystone.common.kvs import core as kvs_core +from keystone import tests +from keystone import token +from keystone.token.backends import kvs as proxy_kvs +from keystone.token.backends import memcache as proxy_memcache +from keystone.token.backends import sql as proxy_sql +from keystone.token.persistence.backends import kvs +from keystone.token.persistence.backends import memcache +from keystone.token.persistence.backends import sql + + +class TokenPersistenceProxyTest(tests.BaseTestCase): + def test_symbols(self): + """Verify token persistence proxy symbols. + + The Token manager has been moved from `keystone.token.core` to + `keystone.token.persistence`. This test verifies that the symbols + resolve as expected. + + """ + self.assertTrue(issubclass(token.Manager, token.persistence.Manager)) + self.assertTrue(issubclass(token.Driver, token.persistence.Driver)) + + +class TokenPersistenceBackendSymbols(tests.TestCase): + def test_symbols(self): + """Verify the token persistence backend proxy symbols. + + Make sure that the modules that are (for compat reasons) located at + `keystone.token.backends` are the same as the new location + `keystone.token.persistence.backends`. + """ + self.assertTrue(issubclass(proxy_kvs.Token, kvs.Token)) + self.assertTrue(issubclass(proxy_memcache.Token, memcache.Token)) + self.assertTrue(issubclass(proxy_sql.Token, sql.Token)) + self.assertIs(proxy_sql.TokenModel, sql.TokenModel) + + def test_instantiation_kvs(self): + self.config_fixture.config( + group='token', + driver='keystone.token.backends.kvs.Token') + + # Clear the KVS registry so we can re-instantiate the KVS backend. This + # is required because the KVS core tries to limit duplication of + # CacheRegion objects and CacheRegion objects cannot be reconfigured. + kvs_core.KEY_VALUE_STORE_REGISTRY.clear() + + manager = token.persistence.Manager() + self.assertIsInstance(manager.driver, proxy_kvs.Token) + self.assertIsInstance(manager.driver, kvs.Token) + + def test_instantiation_memcache(self): + self.config_fixture.config( + group='token', + driver='keystone.token.backends.memcache.Token') + + # The memcache token backend is just a light wrapper around the KVS + # token backend. Clear the KVS registry so we can re-instantiate the + # KVS backend. This is required because the KVS core tries to limit + # duplication of CacheRegion objects and CacheRegion objects cannot be + # reconfigured. + kvs_core.KEY_VALUE_STORE_REGISTRY.clear() + + manager = token.persistence.Manager() + self.assertIsInstance(manager.driver, proxy_memcache.Token) + self.assertIsInstance(manager.driver, memcache.Token) + + def test_instantiation_sql(self): + self.config_fixture.config( + group='token', + driver='keystone.token.backends.sql.Token') + manager = token.persistence.Manager() + self.assertIsInstance(manager.driver, proxy_sql.Token) + self.assertIsInstance(manager.driver, sql.Token) diff --git a/keystone/token/__init__.py b/keystone/token/__init__.py index d1703f274e..9a69e0949a 100644 --- a/keystone/token/__init__.py +++ b/keystone/token/__init__.py @@ -14,5 +14,6 @@ from keystone.token import controllers # noqa from keystone.token.core import * # noqa +from keystone.token import persistence # noqa from keystone.token import provider # noqa from keystone.token import routers # noqa diff --git a/keystone/token/backends/__init__.py b/keystone/token/backends/__init__.py index e69de29bb2..6065887655 100644 --- a/keystone/token/backends/__init__.py +++ b/keystone/token/backends/__init__.py @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(morganfainberg): This module is for transition from the old token +# backend package location to the new one. This module is slated for removal +# in the Kilo development cycle. diff --git a/keystone/token/backends/kvs.py b/keystone/token/backends/kvs.py index b60c4c385b..5407a154dc 100644 --- a/keystone/token/backends/kvs.py +++ b/keystone/token/backends/kvs.py @@ -1,6 +1,3 @@ -# Copyright 2013 Metacloud, Inc. -# Copyright 2012 OpenStack Foundation -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,342 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -from __future__ import absolute_import -import copy - -import six - -from keystone.common import kvs -from keystone import config -from keystone import exception -from keystone.i18n import _ -from keystone.openstack.common import log -from keystone.openstack.common import timeutils -from keystone import token -from keystone.token import provider +from keystone.openstack.common import versionutils +from keystone.token.persistence.backends import kvs -CONF = config.CONF -LOG = log.getLogger(__name__) - - -class Token(token.Driver): - """KeyValueStore backend for tokens. - - This is the base implementation for any/all key-value-stores (e.g. - memcached) for the Token backend. It is recommended to only use the base - in-memory implementation for testing purposes. - """ - - revocation_key = 'revocation-list' - kvs_backend = 'openstack.kvs.Memory' - - def __init__(self, backing_store=None, **kwargs): +class Token(kvs.Token): + @versionutils.deprecated( + versionutils.deprecated.JUNO, + in_favor_of='keystone.token.persistence.backends.kvs.Token', + remove_in=+1, + what='keystone.token.backends.kvs.Token') + def __init__(self): super(Token, self).__init__() - self._store = kvs.get_key_value_store('token-driver') - if backing_store is not None: - self.kvs_backend = backing_store - self._store.configure(backing_store=self.kvs_backend, **kwargs) - if self.__class__ == Token: - # NOTE(morganfainberg): Only warn if the base KVS implementation - # is instantiated. - LOG.warn(_('It is recommended to only use the base ' - 'key-value-store implementation for the token driver ' - 'for testing purposes. ' - 'Please use keystone.token.backends.memcache.Token ' - 'or keystone.token.backends.sql.Token instead.')) - - def _prefix_token_id(self, token_id): - return 'token-%s' % token_id.encode('utf-8') - - def _prefix_user_id(self, user_id): - return 'usertokens-%s' % user_id.encode('utf-8') - - def _get_key_or_default(self, key, default=None): - try: - return self._store.get(key) - except exception.NotFound: - return default - - def _get_key(self, key): - return self._store.get(key) - - def _set_key(self, key, value, lock=None): - self._store.set(key, value, lock) - - def _delete_key(self, key): - return self._store.delete(key) - - def get_token(self, token_id): - ptk = self._prefix_token_id(token_id) - try: - token_ref = self._get_key(ptk) - except exception.NotFound: - raise exception.TokenNotFound(token_id=token_id) - - return token_ref - - def create_token(self, token_id, data): - """Create a token by id and data. - - It is assumed the caller has performed data validation on the "data" - parameter. - """ - data_copy = copy.deepcopy(data) - ptk = self._prefix_token_id(token_id) - if not data_copy.get('expires'): - data_copy['expires'] = provider.default_expire_time() - if not data_copy.get('user_id'): - data_copy['user_id'] = data_copy['user']['id'] - - # NOTE(morganfainberg): for ease of manipulating the data without - # concern about the backend, always store the value(s) in the - # index as the isotime (string) version so this is where the string is - # built. - expires_str = timeutils.isotime(data_copy['expires'], subsecond=True) - - self._set_key(ptk, data_copy) - user_id = data['user']['id'] - user_key = self._prefix_user_id(user_id) - self._update_user_token_list(user_key, token_id, expires_str) - if CONF.trust.enabled and data.get('trust_id'): - # NOTE(morganfainberg): If trusts are enabled and this is a trust - # scoped token, we add the token to the trustee list as well. This - # allows password changes of the trustee to also expire the token. - # There is no harm in placing the token in multiple lists, as - # _list_tokens is smart enough to handle almost any case of - # valid/invalid/expired for a given token. - token_data = data_copy['token_data'] - if data_copy['token_version'] == token.provider.V2: - trustee_user_id = token_data['access']['trust'][ - 'trustee_user_id'] - elif data_copy['token_version'] == token.provider.V3: - trustee_user_id = token_data['OS-TRUST:trust'][ - 'trustee_user_id'] - else: - raise token.provider.UnsupportedTokenVersionException( - _('Unknown token version %s') % - data_copy.get('token_version')) - - trustee_key = self._prefix_user_id(trustee_user_id) - self._update_user_token_list(trustee_key, token_id, expires_str) - - return data_copy - - def _get_user_token_list_with_expiry(self, user_key): - """Return a list of tuples in the format (token_id, token_expiry) for - the user_key. - """ - return self._get_key_or_default(user_key, default=[]) - - def _get_user_token_list(self, user_key): - """Return a list of token_ids for the user_key.""" - token_list = self._get_user_token_list_with_expiry(user_key) - # Each element is a tuple of (token_id, token_expiry). Most code does - # not care about the expiry, it is stripped out and only a - # list of token_ids are returned. - return [t[0] for t in token_list] - - def _update_user_token_list(self, user_key, token_id, expires_isotime_str): - current_time = self._get_current_time() - revoked_token_list = set([t['id'] for t in - self.list_revoked_tokens()]) - - with self._store.get_lock(user_key) as lock: - filtered_list = [] - token_list = self._get_user_token_list_with_expiry(user_key) - for item in token_list: - try: - item_id, expires = self._format_token_index_item(item) - except (ValueError, TypeError): - # NOTE(morganfainberg): Skip on expected errors - # possibilities from the `_format_token_index_item` method. - continue - - if expires < current_time: - LOG.debug(('Token `%(token_id)s` is expired, removing ' - 'from `%(user_key)s`.'), - {'token_id': item_id, 'user_key': user_key}) - continue - - if item_id in revoked_token_list: - # NOTE(morganfainberg): If the token has been revoked, it - # can safely be removed from this list. This helps to keep - # the user_token_list as reasonably small as possible. - LOG.debug(('Token `%(token_id)s` is revoked, removing ' - 'from `%(user_key)s`.'), - {'token_id': item_id, 'user_key': user_key}) - continue - filtered_list.append(item) - filtered_list.append((token_id, expires_isotime_str)) - self._set_key(user_key, filtered_list, lock) - return filtered_list - - def _get_current_time(self): - return timeutils.normalize_time(timeutils.utcnow()) - - def _add_to_revocation_list(self, data, lock): - filtered_list = [] - revoked_token_data = {} - - current_time = self._get_current_time() - expires = data['expires'] - - if isinstance(expires, six.string_types): - expires = timeutils.parse_isotime(expires) - - expires = timeutils.normalize_time(expires) - - if expires < current_time: - LOG.warning(_('Token `%s` is expired, not adding to the ' - 'revocation list.'), data['id']) - return - - revoked_token_data['expires'] = timeutils.isotime(expires, - subsecond=True) - revoked_token_data['id'] = data['id'] - - token_list = self._get_key_or_default(self.revocation_key, default=[]) - if not isinstance(token_list, list): - # NOTE(morganfainberg): In the case that the revocation list is not - # in a format we understand, reinitialize it. This is an attempt to - # not allow the revocation list to be completely broken if - # somehow the key is changed outside of keystone (e.g. memcache - # that is shared by multiple applications). Logging occurs at error - # level so that the cloud administrators have some awareness that - # the revocation_list needed to be cleared out. In all, this should - # be recoverable. Keystone cannot control external applications - # from changing a key in some backends, however, it is possible to - # gracefully handle and notify of this event. - LOG.error(_('Reinitializing revocation list due to error ' - 'in loading revocation list from backend. ' - 'Expected `list` type got `%(type)s`. Old ' - 'revocation list data: %(list)r'), - {'type': type(token_list), 'list': token_list}) - token_list = [] - - # NOTE(morganfainberg): on revocation, cleanup the expired entries, try - # to keep the list of tokens revoked at the minimum. - for token_data in token_list: - try: - expires_at = timeutils.normalize_time( - timeutils.parse_isotime(token_data['expires'])) - except ValueError: - LOG.warning(_('Removing `%s` from revocation list due to ' - 'invalid expires data in revocation list.'), - token_data.get('id', 'INVALID_TOKEN_DATA')) - continue - if expires_at > current_time: - filtered_list.append(token_data) - filtered_list.append(revoked_token_data) - self._set_key(self.revocation_key, filtered_list, lock) - - def delete_token(self, token_id): - # Test for existence - with self._store.get_lock(self.revocation_key) as lock: - data = self.get_token(token_id) - ptk = self._prefix_token_id(token_id) - result = self._delete_key(ptk) - self._add_to_revocation_list(data, lock) - return result - - def delete_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - return super(Token, self).delete_tokens( - user_id=user_id, - tenant_id=tenant_id, - trust_id=trust_id, - consumer_id=consumer_id, - ) - - def _format_token_index_item(self, item): - try: - token_id, expires = item - except (TypeError, ValueError): - LOG.debug(('Invalid token entry expected tuple of ' - '`(, )` got: `%(item)r`'), - dict(item=item)) - raise - - try: - expires = timeutils.normalize_time( - timeutils.parse_isotime(expires)) - except ValueError: - LOG.debug(('Invalid expires time on token `%(token_id)s`:' - ' %(expires)r'), - dict(token_id=token_id, expires=expires)) - raise - return token_id, expires - - def _token_match_tenant(self, token_ref, tenant_id): - if token_ref.get('tenant'): - return token_ref['tenant'].get('id') == tenant_id - return False - - def _token_match_trust(self, token_ref, trust_id): - if not token_ref.get('trust_id'): - return False - return token_ref['trust_id'] == trust_id - - def _token_match_consumer(self, token_ref, consumer_id): - try: - oauth = token_ref['token_data']['token']['OS-OAUTH1'] - return oauth.get('consumer_id') == consumer_id - except KeyError: - return False - - def _list_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - # This function is used to generate the list of tokens that should be - # revoked when revoking by token identifiers. This approach will be - # deprecated soon, probably in the Juno release. Setting revoke_by_id - # to False indicates that this kind of recording should not be - # performed. In order to test the revocation events, tokens shouldn't - # be deleted from the backends. This check ensures that tokens are - # still recorded. - if not CONF.token.revoke_by_id: - return [] - tokens = [] - user_key = self._prefix_user_id(user_id) - token_list = self._get_user_token_list_with_expiry(user_key) - current_time = self._get_current_time() - for item in token_list: - try: - token_id, expires = self._format_token_index_item(item) - except (TypeError, ValueError): - # NOTE(morganfainberg): Skip on expected error possibilities - # from the `_format_token_index_item` method. - continue - - if expires < current_time: - continue - - try: - token_ref = self.get_token(token_id) - except exception.TokenNotFound: - # NOTE(morganfainberg): Token doesn't exist, skip it. - continue - if token_ref: - if tenant_id is not None: - if not self._token_match_tenant(token_ref, tenant_id): - continue - if trust_id is not None: - if not self._token_match_trust(token_ref, trust_id): - continue - if consumer_id is not None: - if not self._token_match_consumer(token_ref, consumer_id): - continue - - tokens.append(token_id) - return tokens - - def list_revoked_tokens(self): - revoked_token_list = self._get_key_or_default(self.revocation_key, - default=[]) - if isinstance(revoked_token_list, list): - return revoked_token_list - return [] - - def flush_expired_tokens(self): - """Archive or delete tokens that have expired.""" - raise exception.NotImplemented() diff --git a/keystone/token/backends/memcache.py b/keystone/token/backends/memcache.py index 28e4fafb3e..bd5df74dea 100644 --- a/keystone/token/backends/memcache.py +++ b/keystone/token/backends/memcache.py @@ -1,6 +1,3 @@ -# Copyright 2013 Metacloud, Inc. -# Copyright 2012 OpenStack Foundation -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,18 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -from keystone.common import config -from keystone.token.backends import kvs +from keystone.openstack.common import versionutils +from keystone.token.persistence.backends import memcache -CONF = config.CONF - - -class Token(kvs.Token): - kvs_backend = 'openstack.kvs.Memcached' - - def __init__(self, *args, **kwargs): - kwargs['no_expiry_keys'] = [self.revocation_key] - kwargs['memcached_expire_time'] = CONF.token.expiration - kwargs['url'] = CONF.memcache.servers - super(Token, self).__init__(*args, **kwargs) +class Token(memcache.Token): + @versionutils.deprecated( + versionutils.deprecated.JUNO, + in_favor_of='keystone.token.persistence.backends.memcache.Token', + remove_in=+1, + what='keystone.token.backends.memcache.Token') + def __init__(self): + super(Token, self).__init__() diff --git a/keystone/token/backends/sql.py b/keystone/token/backends/sql.py index b87f10e310..93aaa43e0a 100644 --- a/keystone/token/backends/sql.py +++ b/keystone/token/backends/sql.py @@ -1,5 +1,3 @@ -# Copyright 2012 OpenStack Foundation -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -12,221 +10,18 @@ # License for the specific language governing permissions and limitations # under the License. -import copy - -from keystone.common import sql -from keystone import config -from keystone import exception -from keystone.openstack.common import timeutils -from keystone import token -from keystone.token import provider +from keystone.openstack.common import versionutils +from keystone.token.persistence.backends import sql -CONF = config.CONF +class Token(sql.Token): + @versionutils.deprecated( + versionutils.deprecated.JUNO, + in_favor_of='keystone.token.persistence.backends.sql.Token', + remove_in=+1, + what='keystone.token.backends.sql.Token') + def __init__(self): + super(Token, self).__init__() -class TokenModel(sql.ModelBase, sql.DictBase): - __tablename__ = 'token' - attributes = ['id', 'expires', 'user_id', 'trust_id'] - id = sql.Column(sql.String(64), primary_key=True) - expires = sql.Column(sql.DateTime(), default=None) - extra = sql.Column(sql.JsonBlob()) - valid = sql.Column(sql.Boolean(), default=True, nullable=False) - user_id = sql.Column(sql.String(64)) - trust_id = sql.Column(sql.String(64)) - __table_args__ = ( - sql.Index('ix_token_expires', 'expires'), - sql.Index('ix_token_expires_valid', 'expires', 'valid') - ) - - -class Token(token.Driver): - # Public interface - def get_token(self, token_id): - if token_id is None: - raise exception.TokenNotFound(token_id=token_id) - session = sql.get_session() - token_ref = session.query(TokenModel).get(token_id) - if not token_ref or not token_ref.valid: - raise exception.TokenNotFound(token_id=token_id) - return token_ref.to_dict() - - def create_token(self, token_id, data): - data_copy = copy.deepcopy(data) - if not data_copy.get('expires'): - data_copy['expires'] = provider.default_expire_time() - if not data_copy.get('user_id'): - data_copy['user_id'] = data_copy['user']['id'] - - token_ref = TokenModel.from_dict(data_copy) - token_ref.valid = True - session = sql.get_session() - with session.begin(): - session.add(token_ref) - return token_ref.to_dict() - - def delete_token(self, token_id): - session = sql.get_session() - with session.begin(): - token_ref = session.query(TokenModel).get(token_id) - if not token_ref or not token_ref.valid: - raise exception.TokenNotFound(token_id=token_id) - token_ref.valid = False - - def delete_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - """Deletes all tokens in one session - - The user_id will be ignored if the trust_id is specified. user_id - will always be specified. - If using a trust, the token's user_id is set to the trustee's user ID - or the trustor's user ID, so will use trust_id to query the tokens. - - """ - session = sql.get_session() - with session.begin(): - now = timeutils.utcnow() - query = session.query(TokenModel) - query = query.filter_by(valid=True) - query = query.filter(TokenModel.expires > now) - if trust_id: - query = query.filter(TokenModel.trust_id == trust_id) - else: - query = query.filter(TokenModel.user_id == user_id) - - for token_ref in query.all(): - if tenant_id: - token_ref_dict = token_ref.to_dict() - if not self._tenant_matches(tenant_id, token_ref_dict): - continue - if consumer_id: - token_ref_dict = token_ref.to_dict() - if not self._consumer_matches(consumer_id, token_ref_dict): - continue - - token_ref.valid = False - - def _tenant_matches(self, tenant_id, token_ref_dict): - return ((tenant_id is None) or - (token_ref_dict.get('tenant') and - token_ref_dict['tenant'].get('id') == tenant_id)) - - def _consumer_matches(self, consumer_id, ref): - if consumer_id is None: - return True - else: - try: - oauth = ref['token_data']['token'].get('OS-OAUTH1', {}) - return oauth and oauth['consumer_id'] == consumer_id - except KeyError: - return False - - def _list_tokens_for_trust(self, trust_id): - session = sql.get_session() - tokens = [] - now = timeutils.utcnow() - query = session.query(TokenModel) - query = query.filter(TokenModel.expires > now) - query = query.filter(TokenModel.trust_id == trust_id) - - token_references = query.filter_by(valid=True) - for token_ref in token_references: - token_ref_dict = token_ref.to_dict() - tokens.append(token_ref_dict['id']) - return tokens - - def _list_tokens_for_user(self, user_id, tenant_id=None): - session = sql.get_session() - tokens = [] - now = timeutils.utcnow() - query = session.query(TokenModel) - query = query.filter(TokenModel.expires > now) - query = query.filter(TokenModel.user_id == user_id) - - token_references = query.filter_by(valid=True) - for token_ref in token_references: - token_ref_dict = token_ref.to_dict() - if self._tenant_matches(tenant_id, token_ref_dict): - tokens.append(token_ref['id']) - return tokens - - def _list_tokens_for_consumer(self, user_id, consumer_id): - tokens = [] - session = sql.get_session() - with session.begin(): - now = timeutils.utcnow() - query = session.query(TokenModel) - query = query.filter(TokenModel.expires > now) - query = query.filter(TokenModel.user_id == user_id) - token_references = query.filter_by(valid=True) - - for token_ref in token_references: - token_ref_dict = token_ref.to_dict() - if self._consumer_matches(consumer_id, token_ref_dict): - tokens.append(token_ref_dict['id']) - return tokens - - def _list_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - if not CONF.token.revoke_by_id: - return [] - if trust_id: - return self._list_tokens_for_trust(trust_id) - if consumer_id: - return self._list_tokens_for_consumer(user_id, consumer_id) - else: - return self._list_tokens_for_user(user_id, tenant_id) - - def list_revoked_tokens(self): - session = sql.get_session() - tokens = [] - now = timeutils.utcnow() - query = session.query(TokenModel.id, TokenModel.expires) - query = query.filter(TokenModel.expires > now) - token_references = query.filter_by(valid=False) - for token_ref in token_references: - record = { - 'id': token_ref[0], - 'expires': token_ref[1], - } - tokens.append(record) - return tokens - - def token_flush_batch_size(self, dialect): - batch_size = 0 - if dialect == 'ibm_db_sa': - # This functionality is limited to DB2, because - # it is necessary to prevent the tranaction log - # from filling up, whereas at least some of the - # other supported databases do not support update - # queries with LIMIT subqueries nor do they appear - # to require the use of such queries when deleting - # large numbers of records at once. - batch_size = 100 - # Limit of 100 is known to not fill a transaction log - # of default maximum size while not significantly - # impacting the performance of large token purges on - # systems where the maximum transaction log size has - # been increased beyond the default. - return batch_size - - def flush_expired_tokens(self): - session = sql.get_session() - dialect = session.bind.dialect.name - batch_size = self.token_flush_batch_size(dialect) - if batch_size > 0: - query = session.query(TokenModel.id) - query = query.filter(TokenModel.expires < timeutils.utcnow()) - query = query.limit(batch_size).subquery() - delete_query = (session.query(TokenModel). - filter(TokenModel.id.in_(query))) - while True: - rowcount = delete_query.delete(synchronize_session=False) - if rowcount == 0: - break - else: - query = session.query(TokenModel) - query = query.filter(TokenModel.expires < timeutils.utcnow()) - query.delete(synchronize_session=False) - - session.flush() +TokenModel = sql.TokenModel diff --git a/keystone/token/core.py b/keystone/token/core.py index f25e158e1e..873195b44c 100644 --- a/keystone/token/core.py +++ b/keystone/token/core.py @@ -14,20 +14,13 @@ """Main entry point into the Token service.""" -import abc -import copy - -import six - from keystone.common import cache -from keystone.common import dependency -from keystone.common import manager from keystone import config from keystone import exception from keystone.i18n import _ from keystone.openstack.common import log -from keystone.openstack.common import timeutils from keystone.openstack.common import versionutils +from keystone.token import persistence from keystone.token import provider @@ -97,300 +90,21 @@ def validate_auth_info(self, user_ref, tenant_ref): raise exception.Unauthorized(msg) -@dependency.requires('assignment_api', 'identity_api', 'token_provider_api', - 'trust_api') -@dependency.provider('token_api') -class Manager(manager.Manager): - """Default pivot point for the Token backend. - - See :mod:`keystone.common.manager.Manager` for more details on how this - dynamically calls the backend. - - """ - +class Manager(persistence.Manager): + @versionutils.deprecated( + versionutils.deprecated.JUNO, + in_favor_of='keystone.token.persistence.Manager', + remove_in=+1, + what='keystone.token.core.Manager') def __init__(self): - super(Manager, self).__init__(CONF.token.driver) - - @versionutils.deprecated(as_of=versionutils.deprecated.JUNO, - in_favor_of='token_provider_api.unique_id', - remove_in=+1, - what='token_api.unique_id') - def unique_id(self, token_id): - return self.token_provider_api.unique_id(token_id) - - def _assert_valid(self, token_id, token_ref): - """Raise TokenNotFound if the token is expired.""" - current_time = timeutils.normalize_time(timeutils.utcnow()) - expires = token_ref.get('expires') - if not expires or current_time > timeutils.normalize_time(expires): - raise exception.TokenNotFound(token_id=token_id) - - def get_token(self, token_id): - if not token_id: - # NOTE(morganfainberg): There are cases when the - # context['token_id'] will in-fact be None. This also saves - # a round-trip to the backend if we don't have a token_id. - raise exception.TokenNotFound(token_id='') - unique_id = self.token_provider_api.unique_id(token_id) - token_ref = self._get_token(unique_id) - # NOTE(morganfainberg): Lift expired checking to the manager, there is - # no reason to make the drivers implement this check. With caching, - # self._get_token could return an expired token. Make sure we behave - # as expected and raise TokenNotFound on those instances. - self._assert_valid(token_id, token_ref) - return token_ref - - @cache.on_arguments(should_cache_fn=SHOULD_CACHE, - expiration_time=EXPIRATION_TIME) - def _get_token(self, token_id): - # Only ever use the "unique" id in the cache key. - return self.driver.get_token(token_id) - - def create_token(self, token_id, data): - unique_id = self.token_provider_api.unique_id(token_id) - data_copy = copy.deepcopy(data) - data_copy['id'] = unique_id - ret = self.driver.create_token(unique_id, data_copy) - if SHOULD_CACHE(ret): - # NOTE(morganfainberg): when doing a cache set, you must pass the - # same arguments through, the same as invalidate (this includes - # "self"). First argument is always the value to be cached - self._get_token.set(ret, self, unique_id) - return ret - - def delete_token(self, token_id): - if not CONF.token.revoke_by_id: - return - unique_id = self.token_provider_api.unique_id(token_id) - self.driver.delete_token(unique_id) - self._invalidate_individual_token_cache(unique_id) - self.invalidate_revocation_list() - - def delete_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - if not CONF.token.revoke_by_id: - return - token_list = self.driver._list_tokens(user_id, tenant_id, trust_id, - consumer_id) - self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id) - for token_id in token_list: - unique_id = self.token_provider_api.unique_id(token_id) - self._invalidate_individual_token_cache(unique_id) - self.invalidate_revocation_list() - - @cache.on_arguments(should_cache_fn=SHOULD_CACHE, - expiration_time=REVOCATION_CACHE_EXPIRATION_TIME) - def list_revoked_tokens(self): - return self.driver.list_revoked_tokens() - - def invalidate_revocation_list(self): - # NOTE(morganfainberg): Note that ``self`` needs to be passed to - # invalidate() because of the way the invalidation method works on - # determining cache-keys. - self.list_revoked_tokens.invalidate(self) - - def delete_tokens_for_domain(self, domain_id): - """Delete all tokens for a given domain. - - It will delete all the project-scoped tokens for the projects - that are owned by the given domain, as well as any tokens issued - to users that are owned by this domain. - - However, deletion of domain_scoped tokens will still need to be - implemented as stated in TODO below. - """ - if not CONF.token.revoke_by_id: - return - projects = self.assignment_api.list_projects() - for project in projects: - if project['domain_id'] == domain_id: - for user_id in self.assignment_api.list_user_ids_for_project( - project['id']): - self.delete_tokens_for_user(user_id, project['id']) - # TODO(morganfainberg): implement deletion of domain_scoped tokens. - - users = self.identity_api.list_users(domain_id) - user_ids = (user['id'] for user in users) - self.delete_tokens_for_users(user_ids) - - def delete_tokens_for_user(self, user_id, project_id=None): - """Delete all tokens for a given user or user-project combination. - - This method adds in the extra logic for handling trust-scoped token - revocations in a single call instead of needing to explicitly handle - trusts in the caller's logic. - """ - if not CONF.token.revoke_by_id: - return - self.delete_tokens(user_id, tenant_id=project_id) - for trust in self.trust_api.list_trusts_for_trustee(user_id): - # Ensure we revoke tokens associated to the trust / project - # user_id combination. - self.delete_tokens(user_id, trust_id=trust['id'], - tenant_id=project_id) - for trust in self.trust_api.list_trusts_for_trustor(user_id): - # Ensure we revoke tokens associated to the trust / project / - # user_id combination where the user_id is the trustor. - - # NOTE(morganfainberg): This revocation is a bit coarse, but it - # covers a number of cases such as disabling of the trustor user, - # deletion of the trustor user (for any number of reasons). It - # might make sense to refine this and be more surgical on the - # deletions (e.g. don't revoke tokens for the trusts when the - # trustor changes password). For now, to maintain previous - # functionality, this will continue to be a bit overzealous on - # revocations. - self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'], - tenant_id=project_id) - - def delete_tokens_for_users(self, user_ids, project_id=None): - """Delete all tokens for a list of user_ids. - - :param user_ids: list of user identifiers - :param project_id: optional project identifier - """ - if not CONF.token.revoke_by_id: - return - for user_id in user_ids: - self.delete_tokens_for_user(user_id, project_id=project_id) - - def _invalidate_individual_token_cache(self, token_id): - # NOTE(morganfainberg): invalidate takes the exact same arguments as - # the normal method, this means we need to pass "self" in (which gets - # stripped off). - - # FIXME(morganfainberg): Does this cache actually need to be - # invalidated? We maintain a cached revocation list, which should be - # consulted before accepting a token as valid. For now we will - # do the explicit individual token invalidation. - self._get_token.invalidate(self, token_id) - self.token_provider_api.invalidate_individual_token_cache(token_id) + super(Manager, self).__init__() -@six.add_metaclass(abc.ABCMeta) -class Driver(object): - """Interface description for a Token driver.""" - - @abc.abstractmethod - def get_token(self, token_id): - """Get a token by id. - - :param token_id: identity of the token - :type token_id: string - :returns: token_ref - :raises: keystone.exception.TokenNotFound - - """ - raise exception.NotImplemented() # pragma: no cover - - @abc.abstractmethod - def create_token(self, token_id, data): - """Create a token by id and data. - - :param token_id: identity of the token - :type token_id: string - :param data: dictionary with additional reference information - - :: - - { - expires='' - id=token_id, - user=user_ref, - tenant=tenant_ref, - metadata=metadata_ref - } - - :type data: dict - :returns: token_ref or None. - - """ - raise exception.NotImplemented() # pragma: no cover - - @abc.abstractmethod - def delete_token(self, token_id): - """Deletes a token by id. - - :param token_id: identity of the token - :type token_id: string - :returns: None. - :raises: keystone.exception.TokenNotFound - - """ - raise exception.NotImplemented() # pragma: no cover - - @abc.abstractmethod - def delete_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - """Deletes tokens by user. - - If the tenant_id is not None, only delete the tokens by user id under - the specified tenant. - - If the trust_id is not None, it will be used to query tokens and the - user_id will be ignored. - - If the consumer_id is not None, only delete the tokens by consumer id - that match the specified consumer id. - - :param user_id: identity of user - :type user_id: string - :param tenant_id: identity of the tenant - :type tenant_id: string - :param trust_id: identity of the trust - :type trust_id: string - :param consumer_id: identity of the consumer - :type consumer_id: string - :returns: None. - :raises: keystone.exception.TokenNotFound - - """ - if not CONF.token.revoke_by_id: - return - token_list = self._list_tokens(user_id, - tenant_id=tenant_id, - trust_id=trust_id, - consumer_id=consumer_id) - - for token in token_list: - try: - self.delete_token(token) - except exception.NotFound: - pass - - @abc.abstractmethod - def _list_tokens(self, user_id, tenant_id=None, trust_id=None, - consumer_id=None): - """Returns a list of current token_id's for a user - - This is effectively a private method only used by the ``delete_tokens`` - method and should not be called by anything outside of the - ``token_api`` manager or the token driver itself. - - :param user_id: identity of the user - :type user_id: string - :param tenant_id: identity of the tenant - :type tenant_id: string - :param trust_id: identity of the trust - :type trust_id: string - :param consumer_id: identity of the consumer - :type consumer_id: string - :returns: list of token_id's - - """ - raise exception.NotImplemented() # pragma: no cover - - @abc.abstractmethod - def list_revoked_tokens(self): - """Returns a list of all revoked tokens - - :returns: list of token_id's - - """ - raise exception.NotImplemented() # pragma: no cover - - @abc.abstractmethod - def flush_expired_tokens(self): - """Archive or delete tokens that have expired. - """ - raise exception.NotImplemented() # pragma: no cover +class Driver(persistence.Driver): + @versionutils.deprecated( + versionutils.deprecated.JUNO, + in_favor_of='keystone.token.persistence.Driver', + remove_in=+1, + what='keystone.token.core.Driver') + def __init__(self): + super(Driver, self).__init__() diff --git a/keystone/token/persistence/__init__.py b/keystone/token/persistence/__init__.py new file mode 100644 index 0000000000..29ad5653f0 --- /dev/null +++ b/keystone/token/persistence/__init__.py @@ -0,0 +1,16 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystone.token.persistence.core import * # noqa + + +__all__ = ['Manager', 'Driver', 'backends'] diff --git a/keystone/token/persistence/backends/__init__.py b/keystone/token/persistence/backends/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/keystone/token/persistence/backends/kvs.py b/keystone/token/persistence/backends/kvs.py new file mode 100644 index 0000000000..a437b1366e --- /dev/null +++ b/keystone/token/persistence/backends/kvs.py @@ -0,0 +1,354 @@ +# Copyright 2013 Metacloud, Inc. +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import +import copy + +import six + +from keystone.common import kvs +from keystone import config +from keystone import exception +from keystone.i18n import _ +from keystone.openstack.common import log +from keystone.openstack.common import timeutils +from keystone import token +from keystone.token import provider + + +CONF = config.CONF +LOG = log.getLogger(__name__) + + +class Token(token.persistence.Driver): + """KeyValueStore backend for tokens. + + This is the base implementation for any/all key-value-stores (e.g. + memcached) for the Token backend. It is recommended to only use the base + in-memory implementation for testing purposes. + """ + + revocation_key = 'revocation-list' + kvs_backend = 'openstack.kvs.Memory' + + def __init__(self, backing_store=None, **kwargs): + super(Token, self).__init__() + self._store = kvs.get_key_value_store('token-driver') + if backing_store is not None: + self.kvs_backend = backing_store + self._store.configure(backing_store=self.kvs_backend, **kwargs) + if self.__class__ == Token: + # NOTE(morganfainberg): Only warn if the base KVS implementation + # is instantiated. + LOG.warn(_('It is recommended to only use the base ' + 'key-value-store implementation for the token driver ' + 'for testing purposes. ' + 'Please use keystone.token.backends.memcache.Token ' + 'or keystone.token.backends.sql.Token instead.')) + + def _prefix_token_id(self, token_id): + return 'token-%s' % token_id.encode('utf-8') + + def _prefix_user_id(self, user_id): + return 'usertokens-%s' % user_id.encode('utf-8') + + def _get_key_or_default(self, key, default=None): + try: + return self._store.get(key) + except exception.NotFound: + return default + + def _get_key(self, key): + return self._store.get(key) + + def _set_key(self, key, value, lock=None): + self._store.set(key, value, lock) + + def _delete_key(self, key): + return self._store.delete(key) + + def get_token(self, token_id): + ptk = self._prefix_token_id(token_id) + try: + token_ref = self._get_key(ptk) + except exception.NotFound: + raise exception.TokenNotFound(token_id=token_id) + + return token_ref + + def create_token(self, token_id, data): + """Create a token by id and data. + + It is assumed the caller has performed data validation on the "data" + parameter. + """ + data_copy = copy.deepcopy(data) + ptk = self._prefix_token_id(token_id) + if not data_copy.get('expires'): + data_copy['expires'] = provider.default_expire_time() + if not data_copy.get('user_id'): + data_copy['user_id'] = data_copy['user']['id'] + + # NOTE(morganfainberg): for ease of manipulating the data without + # concern about the backend, always store the value(s) in the + # index as the isotime (string) version so this is where the string is + # built. + expires_str = timeutils.isotime(data_copy['expires'], subsecond=True) + + self._set_key(ptk, data_copy) + user_id = data['user']['id'] + user_key = self._prefix_user_id(user_id) + self._update_user_token_list(user_key, token_id, expires_str) + if CONF.trust.enabled and data.get('trust_id'): + # NOTE(morganfainberg): If trusts are enabled and this is a trust + # scoped token, we add the token to the trustee list as well. This + # allows password changes of the trustee to also expire the token. + # There is no harm in placing the token in multiple lists, as + # _list_tokens is smart enough to handle almost any case of + # valid/invalid/expired for a given token. + token_data = data_copy['token_data'] + if data_copy['token_version'] == token.provider.V2: + trustee_user_id = token_data['access']['trust'][ + 'trustee_user_id'] + elif data_copy['token_version'] == token.provider.V3: + trustee_user_id = token_data['OS-TRUST:trust'][ + 'trustee_user_id'] + else: + raise token.provider.UnsupportedTokenVersionException( + _('Unknown token version %s') % + data_copy.get('token_version')) + + trustee_key = self._prefix_user_id(trustee_user_id) + self._update_user_token_list(trustee_key, token_id, expires_str) + + return data_copy + + def _get_user_token_list_with_expiry(self, user_key): + """Return a list of tuples in the format (token_id, token_expiry) for + the user_key. + """ + return self._get_key_or_default(user_key, default=[]) + + def _get_user_token_list(self, user_key): + """Return a list of token_ids for the user_key.""" + token_list = self._get_user_token_list_with_expiry(user_key) + # Each element is a tuple of (token_id, token_expiry). Most code does + # not care about the expiry, it is stripped out and only a + # list of token_ids are returned. + return [t[0] for t in token_list] + + def _update_user_token_list(self, user_key, token_id, expires_isotime_str): + current_time = self._get_current_time() + revoked_token_list = set([t['id'] for t in + self.list_revoked_tokens()]) + + with self._store.get_lock(user_key) as lock: + filtered_list = [] + token_list = self._get_user_token_list_with_expiry(user_key) + for item in token_list: + try: + item_id, expires = self._format_token_index_item(item) + except (ValueError, TypeError): + # NOTE(morganfainberg): Skip on expected errors + # possibilities from the `_format_token_index_item` method. + continue + + if expires < current_time: + LOG.debug(('Token `%(token_id)s` is expired, removing ' + 'from `%(user_key)s`.'), + {'token_id': item_id, 'user_key': user_key}) + continue + + if item_id in revoked_token_list: + # NOTE(morganfainberg): If the token has been revoked, it + # can safely be removed from this list. This helps to keep + # the user_token_list as reasonably small as possible. + LOG.debug(('Token `%(token_id)s` is revoked, removing ' + 'from `%(user_key)s`.'), + {'token_id': item_id, 'user_key': user_key}) + continue + filtered_list.append(item) + filtered_list.append((token_id, expires_isotime_str)) + self._set_key(user_key, filtered_list, lock) + return filtered_list + + def _get_current_time(self): + return timeutils.normalize_time(timeutils.utcnow()) + + def _add_to_revocation_list(self, data, lock): + filtered_list = [] + revoked_token_data = {} + + current_time = self._get_current_time() + expires = data['expires'] + + if isinstance(expires, six.string_types): + expires = timeutils.parse_isotime(expires) + + expires = timeutils.normalize_time(expires) + + if expires < current_time: + LOG.warning(_('Token `%s` is expired, not adding to the ' + 'revocation list.'), data['id']) + return + + revoked_token_data['expires'] = timeutils.isotime(expires, + subsecond=True) + revoked_token_data['id'] = data['id'] + + token_list = self._get_key_or_default(self.revocation_key, default=[]) + if not isinstance(token_list, list): + # NOTE(morganfainberg): In the case that the revocation list is not + # in a format we understand, reinitialize it. This is an attempt to + # not allow the revocation list to be completely broken if + # somehow the key is changed outside of keystone (e.g. memcache + # that is shared by multiple applications). Logging occurs at error + # level so that the cloud administrators have some awareness that + # the revocation_list needed to be cleared out. In all, this should + # be recoverable. Keystone cannot control external applications + # from changing a key in some backends, however, it is possible to + # gracefully handle and notify of this event. + LOG.error(_('Reinitializing revocation list due to error ' + 'in loading revocation list from backend. ' + 'Expected `list` type got `%(type)s`. Old ' + 'revocation list data: %(list)r'), + {'type': type(token_list), 'list': token_list}) + token_list = [] + + # NOTE(morganfainberg): on revocation, cleanup the expired entries, try + # to keep the list of tokens revoked at the minimum. + for token_data in token_list: + try: + expires_at = timeutils.normalize_time( + timeutils.parse_isotime(token_data['expires'])) + except ValueError: + LOG.warning(_('Removing `%s` from revocation list due to ' + 'invalid expires data in revocation list.'), + token_data.get('id', 'INVALID_TOKEN_DATA')) + continue + if expires_at > current_time: + filtered_list.append(token_data) + filtered_list.append(revoked_token_data) + self._set_key(self.revocation_key, filtered_list, lock) + + def delete_token(self, token_id): + # Test for existence + with self._store.get_lock(self.revocation_key) as lock: + data = self.get_token(token_id) + ptk = self._prefix_token_id(token_id) + result = self._delete_key(ptk) + self._add_to_revocation_list(data, lock) + return result + + def delete_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + return super(Token, self).delete_tokens( + user_id=user_id, + tenant_id=tenant_id, + trust_id=trust_id, + consumer_id=consumer_id, + ) + + def _format_token_index_item(self, item): + try: + token_id, expires = item + except (TypeError, ValueError): + LOG.debug(('Invalid token entry expected tuple of ' + '`(, )` got: `%(item)r`'), + dict(item=item)) + raise + + try: + expires = timeutils.normalize_time( + timeutils.parse_isotime(expires)) + except ValueError: + LOG.debug(('Invalid expires time on token `%(token_id)s`:' + ' %(expires)r'), + dict(token_id=token_id, expires=expires)) + raise + return token_id, expires + + def _token_match_tenant(self, token_ref, tenant_id): + if token_ref.get('tenant'): + return token_ref['tenant'].get('id') == tenant_id + return False + + def _token_match_trust(self, token_ref, trust_id): + if not token_ref.get('trust_id'): + return False + return token_ref['trust_id'] == trust_id + + def _token_match_consumer(self, token_ref, consumer_id): + try: + oauth = token_ref['token_data']['token']['OS-OAUTH1'] + return oauth.get('consumer_id') == consumer_id + except KeyError: + return False + + def _list_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + # This function is used to generate the list of tokens that should be + # revoked when revoking by token identifiers. This approach will be + # deprecated soon, probably in the Juno release. Setting revoke_by_id + # to False indicates that this kind of recording should not be + # performed. In order to test the revocation events, tokens shouldn't + # be deleted from the backends. This check ensures that tokens are + # still recorded. + if not CONF.token.revoke_by_id: + return [] + tokens = [] + user_key = self._prefix_user_id(user_id) + token_list = self._get_user_token_list_with_expiry(user_key) + current_time = self._get_current_time() + for item in token_list: + try: + token_id, expires = self._format_token_index_item(item) + except (TypeError, ValueError): + # NOTE(morganfainberg): Skip on expected error possibilities + # from the `_format_token_index_item` method. + continue + + if expires < current_time: + continue + + try: + token_ref = self.get_token(token_id) + except exception.TokenNotFound: + # NOTE(morganfainberg): Token doesn't exist, skip it. + continue + if token_ref: + if tenant_id is not None: + if not self._token_match_tenant(token_ref, tenant_id): + continue + if trust_id is not None: + if not self._token_match_trust(token_ref, trust_id): + continue + if consumer_id is not None: + if not self._token_match_consumer(token_ref, consumer_id): + continue + + tokens.append(token_id) + return tokens + + def list_revoked_tokens(self): + revoked_token_list = self._get_key_or_default(self.revocation_key, + default=[]) + if isinstance(revoked_token_list, list): + return revoked_token_list + return [] + + def flush_expired_tokens(self): + """Archive or delete tokens that have expired.""" + raise exception.NotImplemented() diff --git a/keystone/token/persistence/backends/memcache.py b/keystone/token/persistence/backends/memcache.py new file mode 100644 index 0000000000..01d9f0674a --- /dev/null +++ b/keystone/token/persistence/backends/memcache.py @@ -0,0 +1,30 @@ +# Copyright 2013 Metacloud, Inc. +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystone.common import config +from keystone.token.persistence.backends import kvs + + +CONF = config.CONF + + +class Token(kvs.Token): + kvs_backend = 'openstack.kvs.Memcached' + + def __init__(self, *args, **kwargs): + kwargs['no_expiry_keys'] = [self.revocation_key] + kwargs['memcached_expire_time'] = CONF.token.expiration + kwargs['url'] = CONF.memcache.servers + super(Token, self).__init__(*args, **kwargs) diff --git a/keystone/token/persistence/backends/sql.py b/keystone/token/persistence/backends/sql.py new file mode 100644 index 0000000000..c37a24ed12 --- /dev/null +++ b/keystone/token/persistence/backends/sql.py @@ -0,0 +1,232 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from keystone.common import sql +from keystone import config +from keystone import exception +from keystone.openstack.common import timeutils +from keystone import token +from keystone.token import provider + + +CONF = config.CONF + + +class TokenModel(sql.ModelBase, sql.DictBase): + __tablename__ = 'token' + attributes = ['id', 'expires', 'user_id', 'trust_id'] + id = sql.Column(sql.String(64), primary_key=True) + expires = sql.Column(sql.DateTime(), default=None) + extra = sql.Column(sql.JsonBlob()) + valid = sql.Column(sql.Boolean(), default=True, nullable=False) + user_id = sql.Column(sql.String(64)) + trust_id = sql.Column(sql.String(64)) + __table_args__ = ( + sql.Index('ix_token_expires', 'expires'), + sql.Index('ix_token_expires_valid', 'expires', 'valid') + ) + + +class Token(token.persistence.Driver): + # Public interface + def get_token(self, token_id): + if token_id is None: + raise exception.TokenNotFound(token_id=token_id) + session = sql.get_session() + token_ref = session.query(TokenModel).get(token_id) + if not token_ref or not token_ref.valid: + raise exception.TokenNotFound(token_id=token_id) + return token_ref.to_dict() + + def create_token(self, token_id, data): + data_copy = copy.deepcopy(data) + if not data_copy.get('expires'): + data_copy['expires'] = provider.default_expire_time() + if not data_copy.get('user_id'): + data_copy['user_id'] = data_copy['user']['id'] + + token_ref = TokenModel.from_dict(data_copy) + token_ref.valid = True + session = sql.get_session() + with session.begin(): + session.add(token_ref) + return token_ref.to_dict() + + def delete_token(self, token_id): + session = sql.get_session() + with session.begin(): + token_ref = session.query(TokenModel).get(token_id) + if not token_ref or not token_ref.valid: + raise exception.TokenNotFound(token_id=token_id) + token_ref.valid = False + + def delete_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + """Deletes all tokens in one session + + The user_id will be ignored if the trust_id is specified. user_id + will always be specified. + If using a trust, the token's user_id is set to the trustee's user ID + or the trustor's user ID, so will use trust_id to query the tokens. + + """ + session = sql.get_session() + with session.begin(): + now = timeutils.utcnow() + query = session.query(TokenModel) + query = query.filter_by(valid=True) + query = query.filter(TokenModel.expires > now) + if trust_id: + query = query.filter(TokenModel.trust_id == trust_id) + else: + query = query.filter(TokenModel.user_id == user_id) + + for token_ref in query.all(): + if tenant_id: + token_ref_dict = token_ref.to_dict() + if not self._tenant_matches(tenant_id, token_ref_dict): + continue + if consumer_id: + token_ref_dict = token_ref.to_dict() + if not self._consumer_matches(consumer_id, token_ref_dict): + continue + + token_ref.valid = False + + def _tenant_matches(self, tenant_id, token_ref_dict): + return ((tenant_id is None) or + (token_ref_dict.get('tenant') and + token_ref_dict['tenant'].get('id') == tenant_id)) + + def _consumer_matches(self, consumer_id, ref): + if consumer_id is None: + return True + else: + try: + oauth = ref['token_data']['token'].get('OS-OAUTH1', {}) + return oauth and oauth['consumer_id'] == consumer_id + except KeyError: + return False + + def _list_tokens_for_trust(self, trust_id): + session = sql.get_session() + tokens = [] + now = timeutils.utcnow() + query = session.query(TokenModel) + query = query.filter(TokenModel.expires > now) + query = query.filter(TokenModel.trust_id == trust_id) + + token_references = query.filter_by(valid=True) + for token_ref in token_references: + token_ref_dict = token_ref.to_dict() + tokens.append(token_ref_dict['id']) + return tokens + + def _list_tokens_for_user(self, user_id, tenant_id=None): + session = sql.get_session() + tokens = [] + now = timeutils.utcnow() + query = session.query(TokenModel) + query = query.filter(TokenModel.expires > now) + query = query.filter(TokenModel.user_id == user_id) + + token_references = query.filter_by(valid=True) + for token_ref in token_references: + token_ref_dict = token_ref.to_dict() + if self._tenant_matches(tenant_id, token_ref_dict): + tokens.append(token_ref['id']) + return tokens + + def _list_tokens_for_consumer(self, user_id, consumer_id): + tokens = [] + session = sql.get_session() + with session.begin(): + now = timeutils.utcnow() + query = session.query(TokenModel) + query = query.filter(TokenModel.expires > now) + query = query.filter(TokenModel.user_id == user_id) + token_references = query.filter_by(valid=True) + + for token_ref in token_references: + token_ref_dict = token_ref.to_dict() + if self._consumer_matches(consumer_id, token_ref_dict): + tokens.append(token_ref_dict['id']) + return tokens + + def _list_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + if not CONF.token.revoke_by_id: + return [] + if trust_id: + return self._list_tokens_for_trust(trust_id) + if consumer_id: + return self._list_tokens_for_consumer(user_id, consumer_id) + else: + return self._list_tokens_for_user(user_id, tenant_id) + + def list_revoked_tokens(self): + session = sql.get_session() + tokens = [] + now = timeutils.utcnow() + query = session.query(TokenModel.id, TokenModel.expires) + query = query.filter(TokenModel.expires > now) + token_references = query.filter_by(valid=False) + for token_ref in token_references: + record = { + 'id': token_ref[0], + 'expires': token_ref[1], + } + tokens.append(record) + return tokens + + def token_flush_batch_size(self, dialect): + batch_size = 0 + if dialect == 'ibm_db_sa': + # This functionality is limited to DB2, because + # it is necessary to prevent the tranaction log + # from filling up, whereas at least some of the + # other supported databases do not support update + # queries with LIMIT subqueries nor do they appear + # to require the use of such queries when deleting + # large numbers of records at once. + batch_size = 100 + # Limit of 100 is known to not fill a transaction log + # of default maximum size while not significantly + # impacting the performance of large token purges on + # systems where the maximum transaction log size has + # been increased beyond the default. + return batch_size + + def flush_expired_tokens(self): + session = sql.get_session() + dialect = session.bind.dialect.name + batch_size = self.token_flush_batch_size(dialect) + if batch_size > 0: + query = session.query(TokenModel.id) + query = query.filter(TokenModel.expires < timeutils.utcnow()) + query = query.limit(batch_size).subquery() + delete_query = (session.query(TokenModel). + filter(TokenModel.id.in_(query))) + while True: + rowcount = delete_query.delete(synchronize_session=False) + if rowcount == 0: + break + else: + query = session.query(TokenModel) + query = query.filter(TokenModel.expires < timeutils.utcnow()) + query.delete(synchronize_session=False) + + session.flush() diff --git a/keystone/token/persistence/core.py b/keystone/token/persistence/core.py new file mode 100644 index 0000000000..c20093fa00 --- /dev/null +++ b/keystone/token/persistence/core.py @@ -0,0 +1,337 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Main entry point into the Token persistence service.""" + +import abc +import copy + +import six + +from keystone.common import cache +from keystone.common import dependency +from keystone.common import manager +from keystone import config +from keystone import exception +from keystone.openstack.common import log +from keystone.openstack.common import timeutils +from keystone.openstack.common import versionutils + + +CONF = config.CONF +LOG = log.getLogger(__name__) +SHOULD_CACHE = cache.should_cache_fn('token') + +# NOTE(blk-u): The config options are not available at import time. +EXPIRATION_TIME = lambda: CONF.token.cache_time +REVOCATION_CACHE_EXPIRATION_TIME = lambda: CONF.token.revocation_cache_time + + +@dependency.requires('assignment_api', 'identity_api', 'token_provider_api', + 'trust_api') +@dependency.provider('token_api') +class Manager(manager.Manager): + """Default pivot point for the Token backend. + + See :mod:`keystone.common.manager.Manager` for more details on how this + dynamically calls the backend. + + """ + + def __init__(self): + super(Manager, self).__init__(CONF.token.driver) + + @versionutils.deprecated(as_of=versionutils.deprecated.JUNO, + in_favor_of='token_provider_api.unique_id', + remove_in=+1, + what='token_api.unique_id') + def unique_id(self, token_id): + return self.token_provider_api.unique_id(token_id) + + def _assert_valid(self, token_id, token_ref): + """Raise TokenNotFound if the token is expired.""" + current_time = timeutils.normalize_time(timeutils.utcnow()) + expires = token_ref.get('expires') + if not expires or current_time > timeutils.normalize_time(expires): + raise exception.TokenNotFound(token_id=token_id) + + def get_token(self, token_id): + if not token_id: + # NOTE(morganfainberg): There are cases when the + # context['token_id'] will in-fact be None. This also saves + # a round-trip to the backend if we don't have a token_id. + raise exception.TokenNotFound(token_id='') + unique_id = self.token_provider_api.unique_id(token_id) + token_ref = self._get_token(unique_id) + # NOTE(morganfainberg): Lift expired checking to the manager, there is + # no reason to make the drivers implement this check. With caching, + # self._get_token could return an expired token. Make sure we behave + # as expected and raise TokenNotFound on those instances. + self._assert_valid(token_id, token_ref) + return token_ref + + @cache.on_arguments(should_cache_fn=SHOULD_CACHE, + expiration_time=EXPIRATION_TIME) + def _get_token(self, token_id): + # Only ever use the "unique" id in the cache key. + return self.driver.get_token(token_id) + + def create_token(self, token_id, data): + unique_id = self.token_provider_api.unique_id(token_id) + data_copy = copy.deepcopy(data) + data_copy['id'] = unique_id + ret = self.driver.create_token(unique_id, data_copy) + if SHOULD_CACHE(ret): + # NOTE(morganfainberg): when doing a cache set, you must pass the + # same arguments through, the same as invalidate (this includes + # "self"). First argument is always the value to be cached + self._get_token.set(ret, self, unique_id) + return ret + + def delete_token(self, token_id): + if not CONF.token.revoke_by_id: + return + unique_id = self.token_provider_api.unique_id(token_id) + self.driver.delete_token(unique_id) + self._invalidate_individual_token_cache(unique_id) + self.invalidate_revocation_list() + + def delete_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + if not CONF.token.revoke_by_id: + return + token_list = self.driver._list_tokens(user_id, tenant_id, trust_id, + consumer_id) + self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id) + for token_id in token_list: + unique_id = self.token_provider_api.unique_id(token_id) + self._invalidate_individual_token_cache(unique_id) + self.invalidate_revocation_list() + + @cache.on_arguments(should_cache_fn=SHOULD_CACHE, + expiration_time=REVOCATION_CACHE_EXPIRATION_TIME) + def list_revoked_tokens(self): + return self.driver.list_revoked_tokens() + + def invalidate_revocation_list(self): + # NOTE(morganfainberg): Note that ``self`` needs to be passed to + # invalidate() because of the way the invalidation method works on + # determining cache-keys. + self.list_revoked_tokens.invalidate(self) + + def delete_tokens_for_domain(self, domain_id): + """Delete all tokens for a given domain. + + It will delete all the project-scoped tokens for the projects + that are owned by the given domain, as well as any tokens issued + to users that are owned by this domain. + + However, deletion of domain_scoped tokens will still need to be + implemented as stated in TODO below. + """ + if not CONF.token.revoke_by_id: + return + projects = self.assignment_api.list_projects() + for project in projects: + if project['domain_id'] == domain_id: + for user_id in self.assignment_api.list_user_ids_for_project( + project['id']): + self.delete_tokens_for_user(user_id, project['id']) + # TODO(morganfainberg): implement deletion of domain_scoped tokens. + + users = self.identity_api.list_users(domain_id) + user_ids = (user['id'] for user in users) + self.delete_tokens_for_users(user_ids) + + def delete_tokens_for_user(self, user_id, project_id=None): + """Delete all tokens for a given user or user-project combination. + + This method adds in the extra logic for handling trust-scoped token + revocations in a single call instead of needing to explicitly handle + trusts in the caller's logic. + """ + if not CONF.token.revoke_by_id: + return + self.delete_tokens(user_id, tenant_id=project_id) + for trust in self.trust_api.list_trusts_for_trustee(user_id): + # Ensure we revoke tokens associated to the trust / project + # user_id combination. + self.delete_tokens(user_id, trust_id=trust['id'], + tenant_id=project_id) + for trust in self.trust_api.list_trusts_for_trustor(user_id): + # Ensure we revoke tokens associated to the trust / project / + # user_id combination where the user_id is the trustor. + + # NOTE(morganfainberg): This revocation is a bit coarse, but it + # covers a number of cases such as disabling of the trustor user, + # deletion of the trustor user (for any number of reasons). It + # might make sense to refine this and be more surgical on the + # deletions (e.g. don't revoke tokens for the trusts when the + # trustor changes password). For now, to maintain previous + # functionality, this will continue to be a bit overzealous on + # revocations. + self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'], + tenant_id=project_id) + + def delete_tokens_for_users(self, user_ids, project_id=None): + """Delete all tokens for a list of user_ids. + + :param user_ids: list of user identifiers + :param project_id: optional project identifier + """ + if not CONF.token.revoke_by_id: + return + for user_id in user_ids: + self.delete_tokens_for_user(user_id, project_id=project_id) + + def _invalidate_individual_token_cache(self, token_id): + # NOTE(morganfainberg): invalidate takes the exact same arguments as + # the normal method, this means we need to pass "self" in (which gets + # stripped off). + + # FIXME(morganfainberg): Does this cache actually need to be + # invalidated? We maintain a cached revocation list, which should be + # consulted before accepting a token as valid. For now we will + # do the explicit individual token invalidation. + self._get_token.invalidate(self, token_id) + self.token_provider_api.invalidate_individual_token_cache(token_id) + + +@six.add_metaclass(abc.ABCMeta) +class Driver(object): + """Interface description for a Token driver.""" + + @abc.abstractmethod + def get_token(self, token_id): + """Get a token by id. + + :param token_id: identity of the token + :type token_id: string + :returns: token_ref + :raises: keystone.exception.TokenNotFound + + """ + raise exception.NotImplemented() # pragma: no cover + + @abc.abstractmethod + def create_token(self, token_id, data): + """Create a token by id and data. + + :param token_id: identity of the token + :type token_id: string + :param data: dictionary with additional reference information + + :: + + { + expires='' + id=token_id, + user=user_ref, + tenant=tenant_ref, + metadata=metadata_ref + } + + :type data: dict + :returns: token_ref or None. + + """ + raise exception.NotImplemented() # pragma: no cover + + @abc.abstractmethod + def delete_token(self, token_id): + """Deletes a token by id. + + :param token_id: identity of the token + :type token_id: string + :returns: None. + :raises: keystone.exception.TokenNotFound + + """ + raise exception.NotImplemented() # pragma: no cover + + @abc.abstractmethod + def delete_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + """Deletes tokens by user. + + If the tenant_id is not None, only delete the tokens by user id under + the specified tenant. + + If the trust_id is not None, it will be used to query tokens and the + user_id will be ignored. + + If the consumer_id is not None, only delete the tokens by consumer id + that match the specified consumer id. + + :param user_id: identity of user + :type user_id: string + :param tenant_id: identity of the tenant + :type tenant_id: string + :param trust_id: identity of the trust + :type trust_id: string + :param consumer_id: identity of the consumer + :type consumer_id: string + :returns: None. + :raises: keystone.exception.TokenNotFound + + """ + if not CONF.token.revoke_by_id: + return + token_list = self._list_tokens(user_id, + tenant_id=tenant_id, + trust_id=trust_id, + consumer_id=consumer_id) + + for token in token_list: + try: + self.delete_token(token) + except exception.NotFound: + pass + + @abc.abstractmethod + def _list_tokens(self, user_id, tenant_id=None, trust_id=None, + consumer_id=None): + """Returns a list of current token_id's for a user + + This is effectively a private method only used by the ``delete_tokens`` + method and should not be called by anything outside of the + ``token_api`` manager or the token driver itself. + + :param user_id: identity of the user + :type user_id: string + :param tenant_id: identity of the tenant + :type tenant_id: string + :param trust_id: identity of the trust + :type trust_id: string + :param consumer_id: identity of the consumer + :type consumer_id: string + :returns: list of token_id's + + """ + raise exception.NotImplemented() # pragma: no cover + + @abc.abstractmethod + def list_revoked_tokens(self): + """Returns a list of all revoked tokens + + :returns: list of token_id's + + """ + raise exception.NotImplemented() # pragma: no cover + + @abc.abstractmethod + def flush_expired_tokens(self): + """Archive or delete tokens that have expired. + """ + raise exception.NotImplemented() # pragma: no cover