Merge "switch to oslo.cache"
This commit is contained in:
commit
8a6c2846d0
@ -3,6 +3,7 @@ output_file = etc/keystone.conf.sample
|
|||||||
wrap_width = 79
|
wrap_width = 79
|
||||||
namespace = keystone
|
namespace = keystone
|
||||||
namespace = keystone.notifications
|
namespace = keystone.notifications
|
||||||
|
namespace = oslo.cache
|
||||||
namespace = oslo.log
|
namespace = oslo.log
|
||||||
namespace = oslo.messaging
|
namespace = oslo.messaging
|
||||||
namespace = oslo.policy
|
namespace = oslo.policy
|
||||||
|
@ -33,7 +33,7 @@ from keystone import notifications
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='role')
|
MEMOIZE = cache.get_memoization_decorator(group='role')
|
||||||
|
|
||||||
|
|
||||||
@dependency.provider('assignment_api')
|
@dependency.provider('assignment_api')
|
||||||
|
@ -35,7 +35,7 @@ from keystone import notifications
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='catalog')
|
MEMOIZE = cache.get_memoization_decorator(group='catalog')
|
||||||
WHITELISTED_PROPERTIES = [
|
WHITELISTED_PROPERTIES = [
|
||||||
'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
|
'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
|
||||||
'compute_host', 'admin_port', 'public_port',
|
'compute_host', 'admin_port', 'public_port',
|
||||||
|
53
keystone/common/cache/backends/memcache_pool.py
vendored
53
keystone/common/cache/backends/memcache_pool.py
vendored
@ -13,49 +13,16 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""dogpile.cache backend that uses Memcached connection pool"""
|
"""This module is deprecated."""
|
||||||
|
|
||||||
import functools
|
from oslo_cache.backends import memcache_pool
|
||||||
import logging
|
from oslo_log import versionutils
|
||||||
|
|
||||||
from dogpile.cache.backends import memcached as memcached_backend
|
|
||||||
|
|
||||||
from keystone.common.cache import _memcache_pool
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
@versionutils.deprecated(
|
||||||
|
versionutils.deprecated.LIBERTY,
|
||||||
|
what='keystone.cache.memcache_pool backend',
|
||||||
# Helper to ease backend refactoring
|
in_favor_of='oslo_cache.memcache_pool backend',
|
||||||
class ClientProxy(object):
|
remove_in=+1)
|
||||||
def __init__(self, client_pool):
|
class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
|
||||||
self.client_pool = client_pool
|
pass
|
||||||
|
|
||||||
def _run_method(self, __name, *args, **kwargs):
|
|
||||||
with self.client_pool.acquire() as client:
|
|
||||||
return getattr(client, __name)(*args, **kwargs)
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
|
||||||
return functools.partial(self._run_method, name)
|
|
||||||
|
|
||||||
|
|
||||||
class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
|
|
||||||
# Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
|
|
||||||
def __init__(self, arguments):
|
|
||||||
super(PooledMemcachedBackend, self).__init__(arguments)
|
|
||||||
self.client_pool = _memcache_pool.MemcacheClientPool(
|
|
||||||
self.url,
|
|
||||||
arguments={
|
|
||||||
'dead_retry': arguments.get('dead_retry', 5 * 60),
|
|
||||||
'socket_timeout': arguments.get('socket_timeout', 3),
|
|
||||||
},
|
|
||||||
maxsize=arguments.get('pool_maxsize', 10),
|
|
||||||
unused_timeout=arguments.get('pool_unused_timeout', 60),
|
|
||||||
conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Since all methods in backend just call one of methods of client, this
|
|
||||||
# lets us avoid need to hack it too much
|
|
||||||
@property
|
|
||||||
def client(self):
|
|
||||||
return ClientProxy(self.client_pool)
|
|
||||||
|
554
keystone/common/cache/backends/mongo.py
vendored
554
keystone/common/cache/backends/mongo.py
vendored
@ -12,550 +12,14 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import abc
|
from oslo_cache.backends import mongo
|
||||||
import datetime
|
from oslo_log import versionutils
|
||||||
|
|
||||||
from dogpile.cache import api
|
|
||||||
from dogpile.cache import util as dp_util
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_utils import importutils
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from keystone import exception
|
@versionutils.deprecated(
|
||||||
from keystone.i18n import _, _LW
|
versionutils.deprecated.LIBERTY,
|
||||||
|
what='keystone.cache.mongo backend',
|
||||||
|
in_favor_of='oslo_cache.mongo backend',
|
||||||
NO_VALUE = api.NO_VALUE
|
remove_in=+1)
|
||||||
LOG = log.getLogger(__name__)
|
class MongoCacheBackend(mongo.MongoCacheBackend):
|
||||||
|
pass
|
||||||
|
|
||||||
class MongoCacheBackend(api.CacheBackend):
|
|
||||||
"""A MongoDB based caching backend implementing dogpile backend APIs.
|
|
||||||
|
|
||||||
Arguments accepted in the arguments dictionary:
|
|
||||||
|
|
||||||
:param db_hosts: string (required), hostname or IP address of the
|
|
||||||
MongoDB server instance. This can be a single MongoDB connection URI,
|
|
||||||
or a list of MongoDB connection URIs.
|
|
||||||
|
|
||||||
:param db_name: string (required), the name of the database to be used.
|
|
||||||
|
|
||||||
:param cache_collection: string (required), the name of collection to store
|
|
||||||
cached data.
|
|
||||||
*Note:* Different collection name can be provided if there is need to
|
|
||||||
create separate container (i.e. collection) for cache data. So region
|
|
||||||
configuration is done per collection.
|
|
||||||
|
|
||||||
Following are optional parameters for MongoDB backend configuration,
|
|
||||||
|
|
||||||
:param username: string, the name of the user to authenticate.
|
|
||||||
|
|
||||||
:param password: string, the password of the user to authenticate.
|
|
||||||
|
|
||||||
:param max_pool_size: integer, the maximum number of connections that the
|
|
||||||
pool will open simultaneously. By default the pool size is 10.
|
|
||||||
|
|
||||||
:param w: integer, write acknowledgement for MongoDB client
|
|
||||||
|
|
||||||
If not provided, then no default is set on MongoDB and then write
|
|
||||||
acknowledgement behavior occurs as per MongoDB default. This parameter
|
|
||||||
name is same as what is used in MongoDB docs. This value is specified
|
|
||||||
at collection level so its applicable to `cache_collection` db write
|
|
||||||
operations.
|
|
||||||
|
|
||||||
If this is a replica set, write operations will block until they have
|
|
||||||
been replicated to the specified number or tagged set of servers.
|
|
||||||
Setting w=0 disables write acknowledgement and all other write concern
|
|
||||||
options.
|
|
||||||
|
|
||||||
:param read_preference: string, the read preference mode for MongoDB client
|
|
||||||
Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
|
|
||||||
``secondaryPreferred``, or ``nearest``. This read_preference is
|
|
||||||
specified at collection level so its applicable to `cache_collection`
|
|
||||||
db read operations.
|
|
||||||
|
|
||||||
:param use_replica: boolean, flag to indicate if replica client to be
|
|
||||||
used. Default is `False`. `replicaset_name` value is required if
|
|
||||||
`True`.
|
|
||||||
|
|
||||||
:param replicaset_name: string, name of replica set.
|
|
||||||
Becomes required if `use_replica` is `True`
|
|
||||||
|
|
||||||
:param son_manipulator: string, name of class with module name which
|
|
||||||
implements MongoDB SONManipulator.
|
|
||||||
Default manipulator used is :class:`.BaseTransform`.
|
|
||||||
|
|
||||||
This manipulator is added per database. In multiple cache
|
|
||||||
configurations, the manipulator name should be same if same
|
|
||||||
database name ``db_name`` is used in those configurations.
|
|
||||||
|
|
||||||
SONManipulator is used to manipulate custom data types as they are
|
|
||||||
saved or retrieved from MongoDB. Custom impl is only needed if cached
|
|
||||||
data is custom class and needs transformations when saving or reading
|
|
||||||
from db. If dogpile cached value contains built-in data types, then
|
|
||||||
BaseTransform class is sufficient as it already handles dogpile
|
|
||||||
CachedValue class transformation.
|
|
||||||
|
|
||||||
:param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
|
|
||||||
time-to-live value.
|
|
||||||
If value is greater than 0, then its assumed that cache_collection
|
|
||||||
needs to be TTL type (has index at 'doc_date' field).
|
|
||||||
By default, the value is -1 and its disabled.
|
|
||||||
Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
|
|
||||||
|
|
||||||
.. NOTE::
|
|
||||||
|
|
||||||
This parameter is different from Dogpile own
|
|
||||||
expiration_time, which is the number of seconds after which Dogpile
|
|
||||||
will consider the value to be expired. When Dogpile considers a
|
|
||||||
value to be expired, it continues to use the value until generation
|
|
||||||
of a new value is complete, when using CacheRegion.get_or_create().
|
|
||||||
Therefore, if you are setting `mongo_ttl_seconds`, you will want to
|
|
||||||
make sure it is greater than expiration_time by at least enough
|
|
||||||
seconds for new values to be generated, else the value would not
|
|
||||||
be available during a regeneration, forcing all threads to wait for
|
|
||||||
a regeneration each time a value expires.
|
|
||||||
|
|
||||||
:param ssl: boolean, If True, create the connection to the server
|
|
||||||
using SSL. Default is `False`. Client SSL connection parameters depends
|
|
||||||
on server side SSL setup. For further reference on SSL configuration:
|
|
||||||
<http://docs.mongodb.org/manual/tutorial/configure-ssl/>
|
|
||||||
|
|
||||||
:param ssl_keyfile: string, the private keyfile used to identify the
|
|
||||||
local connection against mongod. If included with the certfile then
|
|
||||||
only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
|
|
||||||
|
|
||||||
:param ssl_certfile: string, the certificate file used to identify the
|
|
||||||
local connection against mongod. Used only when `ssl` is `True`.
|
|
||||||
|
|
||||||
:param ssl_ca_certs: string, the ca_certs file contains a set of
|
|
||||||
concatenated 'certification authority' certificates, which are used to
|
|
||||||
validate certificates passed from the other end of the connection.
|
|
||||||
Used only when `ssl` is `True`.
|
|
||||||
|
|
||||||
:param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
|
|
||||||
a certificate is required from the other side of the connection, and
|
|
||||||
whether it will be validated if provided. It must be one of the three
|
|
||||||
values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
|
|
||||||
(not required, but validated if provided), or
|
|
||||||
``ssl.CERT_REQUIRED`` (required and validated). If the value of this
|
|
||||||
parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
|
|
||||||
must point to a file of CA certificates. Used only when `ssl`
|
|
||||||
is `True`.
|
|
||||||
|
|
||||||
Rest of arguments are passed to mongo calls for read, write and remove.
|
|
||||||
So related options can be specified to pass to these operations.
|
|
||||||
|
|
||||||
Further details of various supported arguments can be referred from
|
|
||||||
<http://api.mongodb.org/python/current/api/pymongo/>
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, arguments):
|
|
||||||
self.api = MongoApi(arguments)
|
|
||||||
|
|
||||||
@dp_util.memoized_property
|
|
||||||
def client(self):
|
|
||||||
"""Initializes MongoDB connection and collection defaults.
|
|
||||||
|
|
||||||
This initialization is done only once and performed as part of lazy
|
|
||||||
inclusion of MongoDB dependency i.e. add imports only if related
|
|
||||||
backend is used.
|
|
||||||
|
|
||||||
:return: :class:`.MongoApi` instance
|
|
||||||
"""
|
|
||||||
self.api.get_cache_collection()
|
|
||||||
return self.api
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
value = self.client.get(key)
|
|
||||||
if value is None:
|
|
||||||
return NO_VALUE
|
|
||||||
else:
|
|
||||||
return value
|
|
||||||
|
|
||||||
def get_multi(self, keys):
|
|
||||||
values = self.client.get_multi(keys)
|
|
||||||
return [
|
|
||||||
NO_VALUE if key not in values
|
|
||||||
else values[key] for key in keys
|
|
||||||
]
|
|
||||||
|
|
||||||
def set(self, key, value):
|
|
||||||
self.client.set(key, value)
|
|
||||||
|
|
||||||
def set_multi(self, mapping):
|
|
||||||
self.client.set_multi(mapping)
|
|
||||||
|
|
||||||
def delete(self, key):
|
|
||||||
self.client.delete(key)
|
|
||||||
|
|
||||||
def delete_multi(self, keys):
|
|
||||||
self.client.delete_multi(keys)
|
|
||||||
|
|
||||||
|
|
||||||
class MongoApi(object):
|
|
||||||
"""Class handling MongoDB specific functionality.
|
|
||||||
|
|
||||||
This class uses PyMongo APIs internally to create database connection
|
|
||||||
with configured pool size, ensures unique index on key, does database
|
|
||||||
authentication and ensure TTL collection index if configured so.
|
|
||||||
This class also serves as handle to cache collection for dogpile cache
|
|
||||||
APIs.
|
|
||||||
|
|
||||||
In a single deployment, multiple cache configuration can be defined. In
|
|
||||||
that case of multiple cache collections usage, db client connection pool
|
|
||||||
is shared when cache collections are within same database.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# class level attributes for re-use of db client connection and collection
|
|
||||||
_DB = {} # dict of db_name: db connection reference
|
|
||||||
_MONGO_COLLS = {} # dict of cache_collection : db collection reference
|
|
||||||
|
|
||||||
def __init__(self, arguments):
|
|
||||||
self._init_args(arguments)
|
|
||||||
self._data_manipulator = None
|
|
||||||
|
|
||||||
def _init_args(self, arguments):
|
|
||||||
"""Helper logic for collecting and parsing MongoDB specific arguments.
|
|
||||||
|
|
||||||
The arguments passed in are separated out in connection specific
|
|
||||||
setting and rest of arguments are passed to create/update/delete
|
|
||||||
db operations.
|
|
||||||
"""
|
|
||||||
self.conn_kwargs = {} # connection specific arguments
|
|
||||||
|
|
||||||
self.hosts = arguments.pop('db_hosts', None)
|
|
||||||
if self.hosts is None:
|
|
||||||
msg = _('db_hosts value is required')
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
|
|
||||||
self.db_name = arguments.pop('db_name', None)
|
|
||||||
if self.db_name is None:
|
|
||||||
msg = _('database db_name is required')
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
|
|
||||||
self.cache_collection = arguments.pop('cache_collection', None)
|
|
||||||
if self.cache_collection is None:
|
|
||||||
msg = _('cache_collection name is required')
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
|
|
||||||
self.username = arguments.pop('username', None)
|
|
||||||
self.password = arguments.pop('password', None)
|
|
||||||
self.max_pool_size = arguments.pop('max_pool_size', 10)
|
|
||||||
|
|
||||||
self.w = arguments.pop('w', -1)
|
|
||||||
try:
|
|
||||||
self.w = int(self.w)
|
|
||||||
except ValueError:
|
|
||||||
msg = _('integer value expected for w (write concern attribute)')
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
|
|
||||||
self.read_preference = arguments.pop('read_preference', None)
|
|
||||||
|
|
||||||
self.use_replica = arguments.pop('use_replica', False)
|
|
||||||
if self.use_replica:
|
|
||||||
if arguments.get('replicaset_name') is None:
|
|
||||||
msg = _('replicaset_name required when use_replica is True')
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
self.replicaset_name = arguments.get('replicaset_name')
|
|
||||||
|
|
||||||
self.son_manipulator = arguments.pop('son_manipulator', None)
|
|
||||||
|
|
||||||
# set if mongo collection needs to be TTL type.
|
|
||||||
# This needs to be max ttl for any cache entry.
|
|
||||||
# By default, -1 means don't use TTL collection.
|
|
||||||
# With ttl set, it creates related index and have doc_date field with
|
|
||||||
# needed expiration interval
|
|
||||||
self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
|
|
||||||
try:
|
|
||||||
self.ttl_seconds = int(self.ttl_seconds)
|
|
||||||
except ValueError:
|
|
||||||
msg = _('integer value expected for mongo_ttl_seconds')
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
|
|
||||||
self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
|
|
||||||
if self.conn_kwargs['ssl']:
|
|
||||||
ssl_keyfile = arguments.pop('ssl_keyfile', None)
|
|
||||||
ssl_certfile = arguments.pop('ssl_certfile', None)
|
|
||||||
ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
|
|
||||||
ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
|
|
||||||
if ssl_keyfile:
|
|
||||||
self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
|
|
||||||
if ssl_certfile:
|
|
||||||
self.conn_kwargs['ssl_certfile'] = ssl_certfile
|
|
||||||
if ssl_ca_certs:
|
|
||||||
self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
|
|
||||||
if ssl_cert_reqs:
|
|
||||||
self.conn_kwargs['ssl_cert_reqs'] = (
|
|
||||||
self._ssl_cert_req_type(ssl_cert_reqs))
|
|
||||||
|
|
||||||
# rest of arguments are passed to mongo crud calls
|
|
||||||
self.meth_kwargs = arguments
|
|
||||||
|
|
||||||
def _ssl_cert_req_type(self, req_type):
|
|
||||||
try:
|
|
||||||
import ssl
|
|
||||||
except ImportError:
|
|
||||||
raise exception.ValidationError(_('no ssl support available'))
|
|
||||||
req_type = req_type.upper()
|
|
||||||
try:
|
|
||||||
return {
|
|
||||||
'NONE': ssl.CERT_NONE,
|
|
||||||
'OPTIONAL': ssl.CERT_OPTIONAL,
|
|
||||||
'REQUIRED': ssl.CERT_REQUIRED
|
|
||||||
}[req_type]
|
|
||||||
except KeyError:
|
|
||||||
msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
|
|
||||||
'"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
|
|
||||||
raise exception.ValidationError(message=msg)
|
|
||||||
|
|
||||||
def _get_db(self):
|
|
||||||
# defer imports until backend is used
|
|
||||||
global pymongo
|
|
||||||
import pymongo
|
|
||||||
if self.use_replica:
|
|
||||||
connection = pymongo.MongoReplicaSetClient(
|
|
||||||
host=self.hosts, replicaSet=self.replicaset_name,
|
|
||||||
max_pool_size=self.max_pool_size, **self.conn_kwargs)
|
|
||||||
else: # used for standalone node or mongos in sharded setup
|
|
||||||
connection = pymongo.MongoClient(
|
|
||||||
host=self.hosts, max_pool_size=self.max_pool_size,
|
|
||||||
**self.conn_kwargs)
|
|
||||||
|
|
||||||
database = getattr(connection, self.db_name)
|
|
||||||
|
|
||||||
self._assign_data_mainpulator()
|
|
||||||
database.add_son_manipulator(self._data_manipulator)
|
|
||||||
if self.username and self.password:
|
|
||||||
database.authenticate(self.username, self.password)
|
|
||||||
return database
|
|
||||||
|
|
||||||
def _assign_data_mainpulator(self):
|
|
||||||
if self._data_manipulator is None:
|
|
||||||
if self.son_manipulator:
|
|
||||||
self._data_manipulator = importutils.import_object(
|
|
||||||
self.son_manipulator)
|
|
||||||
else:
|
|
||||||
self._data_manipulator = BaseTransform()
|
|
||||||
|
|
||||||
def _get_doc_date(self):
|
|
||||||
if self.ttl_seconds > 0:
|
|
||||||
expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
|
|
||||||
doc_date = timeutils.utcnow() + expire_delta
|
|
||||||
else:
|
|
||||||
doc_date = timeutils.utcnow()
|
|
||||||
return doc_date
|
|
||||||
|
|
||||||
def get_cache_collection(self):
|
|
||||||
if self.cache_collection not in self._MONGO_COLLS:
|
|
||||||
global pymongo
|
|
||||||
import pymongo
|
|
||||||
# re-use db client connection if already defined as part of
|
|
||||||
# earlier dogpile cache configuration
|
|
||||||
if self.db_name not in self._DB:
|
|
||||||
self._DB[self.db_name] = self._get_db()
|
|
||||||
coll = getattr(self._DB[self.db_name], self.cache_collection)
|
|
||||||
|
|
||||||
self._assign_data_mainpulator()
|
|
||||||
if self.read_preference:
|
|
||||||
# pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name
|
|
||||||
f = getattr(pymongo.read_preferences,
|
|
||||||
'read_pref_mode_from_name', None)
|
|
||||||
if not f:
|
|
||||||
f = pymongo.read_preferences.mongos_enum
|
|
||||||
self.read_preference = f(self.read_preference)
|
|
||||||
coll.read_preference = self.read_preference
|
|
||||||
if self.w > -1:
|
|
||||||
coll.write_concern['w'] = self.w
|
|
||||||
if self.ttl_seconds > 0:
|
|
||||||
kwargs = {'expireAfterSeconds': self.ttl_seconds}
|
|
||||||
coll.ensure_index('doc_date', cache_for=5, **kwargs)
|
|
||||||
else:
|
|
||||||
self._validate_ttl_index(coll, self.cache_collection,
|
|
||||||
self.ttl_seconds)
|
|
||||||
self._MONGO_COLLS[self.cache_collection] = coll
|
|
||||||
|
|
||||||
return self._MONGO_COLLS[self.cache_collection]
|
|
||||||
|
|
||||||
def _get_cache_entry(self, key, value, meta, doc_date):
|
|
||||||
"""MongoDB cache data representation.
|
|
||||||
|
|
||||||
Storing cache key as ``_id`` field as MongoDB by default creates
|
|
||||||
unique index on this field. So no need to create separate field and
|
|
||||||
index for storing cache key. Cache data has additional ``doc_date``
|
|
||||||
field for MongoDB TTL collection support.
|
|
||||||
"""
|
|
||||||
return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
|
|
||||||
|
|
||||||
def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
|
|
||||||
"""Checks if existing TTL index is removed on a collection.
|
|
||||||
|
|
||||||
This logs warning when existing collection has TTL index defined and
|
|
||||||
new cache configuration tries to disable index with
|
|
||||||
``mongo_ttl_seconds < 0``. In that case, existing index needs
|
|
||||||
to be addressed first to make new configuration effective.
|
|
||||||
Refer to MongoDB documentation around TTL index for further details.
|
|
||||||
"""
|
|
||||||
indexes = collection.index_information()
|
|
||||||
for indx_name, index_data in indexes.items():
|
|
||||||
if all(k in index_data for k in ('key', 'expireAfterSeconds')):
|
|
||||||
existing_value = index_data['expireAfterSeconds']
|
|
||||||
fld_present = 'doc_date' in index_data['key'][0]
|
|
||||||
if fld_present and existing_value > -1 and ttl_seconds < 1:
|
|
||||||
msg = _LW('TTL index already exists on db collection '
|
|
||||||
'<%(c_name)s>, remove index <%(indx_name)s> '
|
|
||||||
'first to make updated mongo_ttl_seconds value '
|
|
||||||
'to be effective')
|
|
||||||
LOG.warn(msg, {'c_name': coll_name,
|
|
||||||
'indx_name': indx_name})
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
critieria = {'_id': key}
|
|
||||||
result = self.get_cache_collection().find_one(spec_or_id=critieria,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
if result:
|
|
||||||
return result['value']
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_multi(self, keys):
|
|
||||||
db_results = self._get_results_as_dict(keys)
|
|
||||||
return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
|
|
||||||
|
|
||||||
def _get_results_as_dict(self, keys):
|
|
||||||
critieria = {'_id': {'$in': keys}}
|
|
||||||
db_results = self.get_cache_collection().find(spec=critieria,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
return {doc['_id']: doc for doc in db_results}
|
|
||||||
|
|
||||||
def set(self, key, value):
|
|
||||||
doc_date = self._get_doc_date()
|
|
||||||
ref = self._get_cache_entry(key, value.payload, value.metadata,
|
|
||||||
doc_date)
|
|
||||||
spec = {'_id': key}
|
|
||||||
# find and modify does not have manipulator support
|
|
||||||
# so need to do conversion as part of input document
|
|
||||||
ref = self._data_manipulator.transform_incoming(ref, self)
|
|
||||||
self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
|
|
||||||
def set_multi(self, mapping):
|
|
||||||
"""Insert multiple documents specified as key, value pairs.
|
|
||||||
|
|
||||||
In this case, multiple documents can be added via insert provided they
|
|
||||||
do not exist.
|
|
||||||
Update of multiple existing documents is done one by one
|
|
||||||
"""
|
|
||||||
doc_date = self._get_doc_date()
|
|
||||||
insert_refs = []
|
|
||||||
update_refs = []
|
|
||||||
existing_docs = self._get_results_as_dict(list(mapping.keys()))
|
|
||||||
for key, value in mapping.items():
|
|
||||||
ref = self._get_cache_entry(key, value.payload, value.metadata,
|
|
||||||
doc_date)
|
|
||||||
if key in existing_docs:
|
|
||||||
ref['_id'] = existing_docs[key]['_id']
|
|
||||||
update_refs.append(ref)
|
|
||||||
else:
|
|
||||||
insert_refs.append(ref)
|
|
||||||
if insert_refs:
|
|
||||||
self.get_cache_collection().insert(insert_refs, manipulate=True,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
for upd_doc in update_refs:
|
|
||||||
self.get_cache_collection().save(upd_doc, manipulate=True,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
|
|
||||||
def delete(self, key):
|
|
||||||
critieria = {'_id': key}
|
|
||||||
self.get_cache_collection().remove(spec_or_id=critieria,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
|
|
||||||
def delete_multi(self, keys):
|
|
||||||
critieria = {'_id': {'$in': keys}}
|
|
||||||
self.get_cache_collection().remove(spec_or_id=critieria,
|
|
||||||
**self.meth_kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class AbstractManipulator(object):
|
|
||||||
"""Abstract class with methods which need to be implemented for custom
|
|
||||||
manipulation.
|
|
||||||
|
|
||||||
Adding this as a base class for :class:`.BaseTransform` instead of adding
|
|
||||||
import dependency of pymongo specific class i.e.
|
|
||||||
`pymongo.son_manipulator.SONManipulator` and using that as base class.
|
|
||||||
This is done to avoid pymongo dependency if MongoDB backend is not used.
|
|
||||||
"""
|
|
||||||
@abc.abstractmethod
|
|
||||||
def transform_incoming(self, son, collection):
|
|
||||||
"""Used while saving data to MongoDB.
|
|
||||||
|
|
||||||
:param son: the SON object to be inserted into the database
|
|
||||||
:param collection: the collection the object is being inserted into
|
|
||||||
|
|
||||||
:returns: transformed SON object
|
|
||||||
|
|
||||||
"""
|
|
||||||
raise exception.NotImplemented() # pragma: no cover
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def transform_outgoing(self, son, collection):
|
|
||||||
"""Used while reading data from MongoDB.
|
|
||||||
|
|
||||||
:param son: the SON object being retrieved from the database
|
|
||||||
:param collection: the collection this object was stored in
|
|
||||||
|
|
||||||
:returns: transformed SON object
|
|
||||||
"""
|
|
||||||
raise exception.NotImplemented() # pragma: no cover
|
|
||||||
|
|
||||||
def will_copy(self):
|
|
||||||
"""Will this SON manipulator make a copy of the incoming document?
|
|
||||||
|
|
||||||
Derived classes that do need to make a copy should override this
|
|
||||||
method, returning `True` instead of `False`.
|
|
||||||
|
|
||||||
:returns: boolean
|
|
||||||
"""
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class BaseTransform(AbstractManipulator):
|
|
||||||
"""Base transformation class to store and read dogpile cached data
|
|
||||||
from MongoDB.
|
|
||||||
|
|
||||||
This is needed as dogpile internally stores data as a custom class
|
|
||||||
i.e. dogpile.cache.api.CachedValue
|
|
||||||
|
|
||||||
Note: Custom manipulator needs to always override ``transform_incoming``
|
|
||||||
and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
|
|
||||||
checks that overridden method in instance and its super are different.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def transform_incoming(self, son, collection):
|
|
||||||
"""Used while saving data to MongoDB."""
|
|
||||||
for (key, value) in list(son.items()):
|
|
||||||
if isinstance(value, api.CachedValue):
|
|
||||||
son[key] = value.payload # key is 'value' field here
|
|
||||||
son['meta'] = value.metadata
|
|
||||||
elif isinstance(value, dict): # Make sure we recurse into sub-docs
|
|
||||||
son[key] = self.transform_incoming(value, collection)
|
|
||||||
return son
|
|
||||||
|
|
||||||
def transform_outgoing(self, son, collection):
|
|
||||||
"""Used while reading data from MongoDB."""
|
|
||||||
metadata = None
|
|
||||||
# make sure its top level dictionary with all expected fields names
|
|
||||||
# present
|
|
||||||
if isinstance(son, dict) and all(k in son for k in
|
|
||||||
('_id', 'value', 'meta', 'doc_date')):
|
|
||||||
payload = son.pop('value', None)
|
|
||||||
metadata = son.pop('meta', None)
|
|
||||||
for (key, value) in list(son.items()):
|
|
||||||
if isinstance(value, dict):
|
|
||||||
son[key] = self.transform_outgoing(value, collection)
|
|
||||||
if metadata is not None:
|
|
||||||
son['value'] = api.CachedValue(payload, metadata)
|
|
||||||
return son
|
|
||||||
|
6
keystone/common/cache/backends/noop.py
vendored
6
keystone/common/cache/backends/noop.py
vendored
@ -13,11 +13,17 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from dogpile.cache import api
|
from dogpile.cache import api
|
||||||
|
from oslo_log import versionutils
|
||||||
|
|
||||||
|
|
||||||
NO_VALUE = api.NO_VALUE
|
NO_VALUE = api.NO_VALUE
|
||||||
|
|
||||||
|
|
||||||
|
@versionutils.deprecated(
|
||||||
|
versionutils.deprecated.LIBERTY,
|
||||||
|
what='keystone.common.cache.noop backend',
|
||||||
|
in_favor_of="dogpile.cache's Null backend",
|
||||||
|
remove_in=+1)
|
||||||
class NoopCacheBackend(api.CacheBackend):
|
class NoopCacheBackend(api.CacheBackend):
|
||||||
"""A no op backend as a default caching backend.
|
"""A no op backend as a default caching backend.
|
||||||
|
|
||||||
|
284
keystone/common/cache/core.py
vendored
284
keystone/common/cache/core.py
vendored
@ -15,21 +15,25 @@
|
|||||||
"""Keystone Caching Layer Implementation."""
|
"""Keystone Caching Layer Implementation."""
|
||||||
|
|
||||||
import dogpile.cache
|
import dogpile.cache
|
||||||
from dogpile.cache import proxy
|
from oslo_cache import core as cache
|
||||||
from dogpile.cache import util
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
|
||||||
from oslo_utils import importutils
|
|
||||||
|
|
||||||
from keystone import exception
|
|
||||||
from keystone.i18n import _, _LE
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
CACHE_REGION = cache.create_region()
|
||||||
|
|
||||||
make_region = dogpile.cache.make_region
|
|
||||||
|
|
||||||
|
def configure_cache():
|
||||||
|
cache.configure_cache_region(CONF, CACHE_REGION)
|
||||||
|
|
||||||
|
|
||||||
|
def get_memoization_decorator(group, expiration_group=None):
|
||||||
|
return cache.get_memoization_decorator(CONF, CACHE_REGION, group,
|
||||||
|
expiration_group=expiration_group)
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(stevemar): When memcache_pool, mongo and noop backends are removed
|
||||||
|
# we no longer need to register the backends here.
|
||||||
dogpile.cache.register_backend(
|
dogpile.cache.register_backend(
|
||||||
'keystone.common.cache.noop',
|
'keystone.common.cache.noop',
|
||||||
'keystone.common.cache.backends.noop',
|
'keystone.common.cache.backends.noop',
|
||||||
@ -44,265 +48,3 @@ dogpile.cache.register_backend(
|
|||||||
'keystone.cache.memcache_pool',
|
'keystone.cache.memcache_pool',
|
||||||
'keystone.common.cache.backends.memcache_pool',
|
'keystone.common.cache.backends.memcache_pool',
|
||||||
'PooledMemcachedBackend')
|
'PooledMemcachedBackend')
|
||||||
|
|
||||||
|
|
||||||
class DebugProxy(proxy.ProxyBackend):
|
|
||||||
"""Extra Logging ProxyBackend."""
|
|
||||||
# NOTE(morganfainberg): Pass all key/values through repr to ensure we have
|
|
||||||
# a clean description of the information. Without use of repr, it might
|
|
||||||
# be possible to run into encode/decode error(s). For logging/debugging
|
|
||||||
# purposes encode/decode is irrelevant and we should be looking at the
|
|
||||||
# data exactly as it stands.
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
value = self.proxied.get(key)
|
|
||||||
LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
|
|
||||||
{'key': key, 'value': value})
|
|
||||||
return value
|
|
||||||
|
|
||||||
def get_multi(self, keys):
|
|
||||||
values = self.proxied.get_multi(keys)
|
|
||||||
LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
|
|
||||||
{'keys': keys, 'values': values})
|
|
||||||
return values
|
|
||||||
|
|
||||||
def set(self, key, value):
|
|
||||||
LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
|
|
||||||
{'key': key, 'value': value})
|
|
||||||
return self.proxied.set(key, value)
|
|
||||||
|
|
||||||
def set_multi(self, keys):
|
|
||||||
LOG.debug('CACHE_SET_MULTI: "%r"', keys)
|
|
||||||
self.proxied.set_multi(keys)
|
|
||||||
|
|
||||||
def delete(self, key):
|
|
||||||
self.proxied.delete(key)
|
|
||||||
LOG.debug('CACHE_DELETE: "%r"', key)
|
|
||||||
|
|
||||||
def delete_multi(self, keys):
|
|
||||||
LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
|
|
||||||
self.proxied.delete_multi(keys)
|
|
||||||
|
|
||||||
|
|
||||||
def build_cache_config():
|
|
||||||
"""Build the cache region dictionary configuration.
|
|
||||||
|
|
||||||
:returns: dict
|
|
||||||
"""
|
|
||||||
prefix = CONF.cache.config_prefix
|
|
||||||
conf_dict = {}
|
|
||||||
conf_dict['%s.backend' % prefix] = CONF.cache.backend
|
|
||||||
conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
|
|
||||||
for argument in CONF.cache.backend_argument:
|
|
||||||
try:
|
|
||||||
(argname, argvalue) = argument.split(':', 1)
|
|
||||||
except ValueError:
|
|
||||||
msg = _LE('Unable to build cache config-key. Expected format '
|
|
||||||
'"<argname>:<value>". Skipping unknown format: %s')
|
|
||||||
LOG.error(msg, argument)
|
|
||||||
continue
|
|
||||||
|
|
||||||
arg_key = '.'.join([prefix, 'arguments', argname])
|
|
||||||
conf_dict[arg_key] = argvalue
|
|
||||||
|
|
||||||
LOG.debug('Keystone Cache Config: %s', conf_dict)
|
|
||||||
# NOTE(yorik-sar): these arguments will be used for memcache-related
|
|
||||||
# backends. Use setdefault for url to support old-style setting through
|
|
||||||
# backend_argument=url:127.0.0.1:11211
|
|
||||||
conf_dict.setdefault('%s.arguments.url' % prefix,
|
|
||||||
CONF.cache.memcache_servers)
|
|
||||||
for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
|
|
||||||
'pool_unused_timeout', 'pool_connection_get_timeout'):
|
|
||||||
value = getattr(CONF.cache, 'memcache_' + arg)
|
|
||||||
conf_dict['%s.arguments.%s' % (prefix, arg)] = value
|
|
||||||
|
|
||||||
return conf_dict
|
|
||||||
|
|
||||||
|
|
||||||
def configure_cache_region(region):
|
|
||||||
"""Configure a cache region.
|
|
||||||
|
|
||||||
:param region: optional CacheRegion object, if not provided a new region
|
|
||||||
will be instantiated
|
|
||||||
:raises: exception.ValidationError
|
|
||||||
:returns: dogpile.cache.CacheRegion
|
|
||||||
"""
|
|
||||||
if not isinstance(region, dogpile.cache.CacheRegion):
|
|
||||||
raise exception.ValidationError(
|
|
||||||
_('region not type dogpile.cache.CacheRegion'))
|
|
||||||
|
|
||||||
if not region.is_configured:
|
|
||||||
# NOTE(morganfainberg): this is how you tell if a region is configured.
|
|
||||||
# There is a request logged with dogpile.cache upstream to make this
|
|
||||||
# easier / less ugly.
|
|
||||||
|
|
||||||
config_dict = build_cache_config()
|
|
||||||
region.configure_from_config(config_dict,
|
|
||||||
'%s.' % CONF.cache.config_prefix)
|
|
||||||
|
|
||||||
if CONF.cache.debug_cache_backend:
|
|
||||||
region.wrap(DebugProxy)
|
|
||||||
|
|
||||||
# NOTE(morganfainberg): if the backend requests the use of a
|
|
||||||
# key_mangler, we should respect that key_mangler function. If a
|
|
||||||
# key_mangler is not defined by the backend, use the sha1_mangle_key
|
|
||||||
# mangler provided by dogpile.cache. This ensures we always use a fixed
|
|
||||||
# size cache-key.
|
|
||||||
if region.key_mangler is None:
|
|
||||||
region.key_mangler = util.sha1_mangle_key
|
|
||||||
|
|
||||||
for class_path in CONF.cache.proxies:
|
|
||||||
# NOTE(morganfainberg): if we have any proxy wrappers, we should
|
|
||||||
# ensure they are added to the cache region's backend. Since
|
|
||||||
# configure_from_config doesn't handle the wrap argument, we need
|
|
||||||
# to manually add the Proxies. For information on how the
|
|
||||||
# ProxyBackends work, see the dogpile.cache documents on
|
|
||||||
# "changing-backend-behavior"
|
|
||||||
cls = importutils.import_class(class_path)
|
|
||||||
LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
|
|
||||||
region.wrap(cls)
|
|
||||||
|
|
||||||
return region
|
|
||||||
|
|
||||||
|
|
||||||
def get_should_cache_fn(section):
|
|
||||||
"""Build a function that returns a config section's caching status.
|
|
||||||
|
|
||||||
For any given driver in keystone that has caching capabilities, a boolean
|
|
||||||
config option for that driver's section (e.g. ``token``) should exist and
|
|
||||||
default to ``True``. This function will use that value to tell the caching
|
|
||||||
decorator if caching for that driver is enabled. To properly use this
|
|
||||||
with the decorator, pass this function the configuration section and assign
|
|
||||||
the result to a variable. Pass the new variable to the caching decorator
|
|
||||||
as the named argument ``should_cache_fn``. e.g.::
|
|
||||||
|
|
||||||
from keystone.common import cache
|
|
||||||
|
|
||||||
SHOULD_CACHE = cache.get_should_cache_fn('token')
|
|
||||||
|
|
||||||
@cache.on_arguments(should_cache_fn=SHOULD_CACHE)
|
|
||||||
def function(arg1, arg2):
|
|
||||||
...
|
|
||||||
|
|
||||||
:param section: name of the configuration section to examine
|
|
||||||
:type section: string
|
|
||||||
:returns: function reference
|
|
||||||
"""
|
|
||||||
def should_cache(value):
|
|
||||||
if not CONF.cache.enabled:
|
|
||||||
return False
|
|
||||||
conf_group = getattr(CONF, section)
|
|
||||||
return getattr(conf_group, 'caching', True)
|
|
||||||
return should_cache
|
|
||||||
|
|
||||||
|
|
||||||
def get_expiration_time_fn(section):
|
|
||||||
"""Build a function that returns a config section's expiration time status.
|
|
||||||
|
|
||||||
For any given driver in keystone that has caching capabilities, an int
|
|
||||||
config option called ``cache_time`` for that driver's section
|
|
||||||
(e.g. ``token``) should exist and typically default to ``None``. This
|
|
||||||
function will use that value to tell the caching decorator of the TTL
|
|
||||||
override for caching the resulting objects. If the value of the config
|
|
||||||
option is ``None`` the default value provided in the
|
|
||||||
``[cache] expiration_time`` option will be used by the decorator. The
|
|
||||||
default may be set to something other than ``None`` in cases where the
|
|
||||||
caching TTL should not be tied to the global default(s) (e.g.
|
|
||||||
revocation_list changes very infrequently and can be cached for >1h by
|
|
||||||
default).
|
|
||||||
|
|
||||||
To properly use this with the decorator, pass this function the
|
|
||||||
configuration section and assign the result to a variable. Pass the new
|
|
||||||
variable to the caching decorator as the named argument
|
|
||||||
``expiration_time``. e.g.::
|
|
||||||
|
|
||||||
from keystone.common import cache
|
|
||||||
|
|
||||||
EXPIRATION_TIME = cache.get_expiration_time_fn('token')
|
|
||||||
|
|
||||||
@cache.on_arguments(expiration_time=EXPIRATION_TIME)
|
|
||||||
def function(arg1, arg2):
|
|
||||||
...
|
|
||||||
|
|
||||||
:param section: name of the configuration section to examine
|
|
||||||
:type section: string
|
|
||||||
:rtype: function reference
|
|
||||||
"""
|
|
||||||
def get_expiration_time():
|
|
||||||
conf_group = getattr(CONF, section)
|
|
||||||
return getattr(conf_group, 'cache_time', None)
|
|
||||||
return get_expiration_time
|
|
||||||
|
|
||||||
|
|
||||||
def key_generate_to_str(s):
|
|
||||||
# NOTE(morganfainberg): Since we need to stringify all arguments, attempt
|
|
||||||
# to stringify and handle the Unicode error explicitly as needed.
|
|
||||||
try:
|
|
||||||
return str(s)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
return s.encode('utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
def function_key_generator(namespace, fn, to_str=key_generate_to_str):
|
|
||||||
# NOTE(morganfainberg): This wraps dogpile.cache's default
|
|
||||||
# function_key_generator to change the default to_str mechanism.
|
|
||||||
return util.function_key_generator(namespace, fn, to_str=to_str)
|
|
||||||
|
|
||||||
|
|
||||||
REGION = dogpile.cache.make_region(
|
|
||||||
function_key_generator=function_key_generator)
|
|
||||||
on_arguments = REGION.cache_on_arguments
|
|
||||||
|
|
||||||
|
|
||||||
def get_memoization_decorator(section, expiration_section=None):
|
|
||||||
"""Build a function based on the `on_arguments` decorator for the section.
|
|
||||||
|
|
||||||
For any given driver in Keystone that has caching capabilities, a
|
|
||||||
pair of functions is required to properly determine the status of the
|
|
||||||
caching capabilities (a toggle to indicate caching is enabled and any
|
|
||||||
override of the default TTL for cached data). This function will return
|
|
||||||
an object that has the memoization decorator ``on_arguments``
|
|
||||||
pre-configured for the driver.
|
|
||||||
|
|
||||||
Example usage::
|
|
||||||
|
|
||||||
from keystone.common import cache
|
|
||||||
|
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='token')
|
|
||||||
|
|
||||||
@MEMOIZE
|
|
||||||
def function(arg1, arg2):
|
|
||||||
...
|
|
||||||
|
|
||||||
|
|
||||||
ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
|
|
||||||
section='token', expiration_section='revoke')
|
|
||||||
|
|
||||||
@ALTERNATE_MEMOIZE
|
|
||||||
def function2(arg1, arg2):
|
|
||||||
...
|
|
||||||
|
|
||||||
:param section: name of the configuration section to examine
|
|
||||||
:type section: string
|
|
||||||
:param expiration_section: name of the configuration section to examine
|
|
||||||
for the expiration option. This will fall back
|
|
||||||
to using ``section`` if the value is unspecified
|
|
||||||
or ``None``
|
|
||||||
:type expiration_section: string
|
|
||||||
:rtype: function reference
|
|
||||||
"""
|
|
||||||
if expiration_section is None:
|
|
||||||
expiration_section = section
|
|
||||||
should_cache = get_should_cache_fn(section)
|
|
||||||
expiration_time = get_expiration_time_fn(expiration_section)
|
|
||||||
|
|
||||||
memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
|
|
||||||
expiration_time=expiration_time)
|
|
||||||
|
|
||||||
# Make sure the actual "should_cache" and "expiration_time" methods are
|
|
||||||
# available. This is potentially interesting/useful to pre-seed cache
|
|
||||||
# values.
|
|
||||||
memoize.should_cache = should_cache
|
|
||||||
memoize.get_expiration_time = expiration_time
|
|
||||||
|
|
||||||
return memoize
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_cache import core as cache
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
import passlib.utils
|
import passlib.utils
|
||||||
@ -306,82 +307,6 @@ FILE_OPTIONS = {
|
|||||||
deprecated_opts=[cfg.DeprecatedOpt(
|
deprecated_opts=[cfg.DeprecatedOpt(
|
||||||
'revocation_cache_time', group='token')]),
|
'revocation_cache_time', group='token')]),
|
||||||
],
|
],
|
||||||
'cache': [
|
|
||||||
cfg.StrOpt('config_prefix', default='cache.keystone',
|
|
||||||
help='Prefix for building the configuration dictionary '
|
|
||||||
'for the cache region. This should not need to be '
|
|
||||||
'changed unless there is another dogpile.cache '
|
|
||||||
'region with the same configuration name.'),
|
|
||||||
cfg.IntOpt('expiration_time', default=600,
|
|
||||||
help='Default TTL, in seconds, for any cached item in '
|
|
||||||
'the dogpile.cache region. This applies to any '
|
|
||||||
'cached method that doesn\'t have an explicit '
|
|
||||||
'cache expiration time defined for it.'),
|
|
||||||
# NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
|
|
||||||
# and other such single-process/thread deployments. Running
|
|
||||||
# dogpile.cache.memory in any other configuration has the same pitfalls
|
|
||||||
# as the KVS token backend. It is recommended that either Redis or
|
|
||||||
# Memcached are used as the dogpile backend for real workloads. To
|
|
||||||
# prevent issues with the memory cache ending up in "production"
|
|
||||||
# unintentionally, we register a no-op as the keystone default caching
|
|
||||||
# backend.
|
|
||||||
cfg.StrOpt('backend', default='keystone.common.cache.noop',
|
|
||||||
help='Dogpile.cache backend module. It is recommended '
|
|
||||||
'that Memcache with pooling '
|
|
||||||
'(keystone.cache.memcache_pool) or Redis '
|
|
||||||
'(dogpile.cache.redis) be used in production '
|
|
||||||
'deployments. Small workloads (single process) '
|
|
||||||
'like devstack can use the dogpile.cache.memory '
|
|
||||||
'backend.'),
|
|
||||||
cfg.MultiStrOpt('backend_argument', default=[], secret=True,
|
|
||||||
help='Arguments supplied to the backend module. '
|
|
||||||
'Specify this option once per argument to be '
|
|
||||||
'passed to the dogpile.cache backend. Example '
|
|
||||||
'format: "<argname>:<value>".'),
|
|
||||||
cfg.ListOpt('proxies', default=[],
|
|
||||||
help='Proxy classes to import that will affect the way '
|
|
||||||
'the dogpile.cache backend functions. See the '
|
|
||||||
'dogpile.cache documentation on '
|
|
||||||
'changing-backend-behavior.'),
|
|
||||||
cfg.BoolOpt('enabled', default=False,
|
|
||||||
help='Global toggle for all caching using the '
|
|
||||||
'should_cache_fn mechanism.'),
|
|
||||||
cfg.BoolOpt('debug_cache_backend', default=False,
|
|
||||||
help='Extra debugging from the cache backend (cache '
|
|
||||||
'keys, get/set/delete/etc calls). This is only '
|
|
||||||
'really useful if you need to see the specific '
|
|
||||||
'cache-backend get/set/delete calls with the '
|
|
||||||
'keys/values. Typically this should be left set '
|
|
||||||
'to false.'),
|
|
||||||
cfg.ListOpt('memcache_servers', default=['localhost:11211'],
|
|
||||||
help='Memcache servers in the format of "host:port".'
|
|
||||||
' (dogpile.cache.memcache and keystone.cache.memcache_pool'
|
|
||||||
' backends only).'),
|
|
||||||
cfg.IntOpt('memcache_dead_retry',
|
|
||||||
default=5 * 60,
|
|
||||||
help='Number of seconds memcached server is considered dead'
|
|
||||||
' before it is tried again. (dogpile.cache.memcache and'
|
|
||||||
' keystone.cache.memcache_pool backends only).'),
|
|
||||||
cfg.IntOpt('memcache_socket_timeout',
|
|
||||||
default=3,
|
|
||||||
help='Timeout in seconds for every call to a server.'
|
|
||||||
' (dogpile.cache.memcache and keystone.cache.memcache_pool'
|
|
||||||
' backends only).'),
|
|
||||||
cfg.IntOpt('memcache_pool_maxsize',
|
|
||||||
default=10,
|
|
||||||
help='Max total number of open connections to every'
|
|
||||||
' memcached server. (keystone.cache.memcache_pool backend'
|
|
||||||
' only).'),
|
|
||||||
cfg.IntOpt('memcache_pool_unused_timeout',
|
|
||||||
default=60,
|
|
||||||
help='Number of seconds a connection to memcached is held'
|
|
||||||
' unused in the pool before it is closed.'
|
|
||||||
' (keystone.cache.memcache_pool backend only).'),
|
|
||||||
cfg.IntOpt('memcache_pool_connection_get_timeout',
|
|
||||||
default=10,
|
|
||||||
help='Number of seconds that an operation will wait to get '
|
|
||||||
'a memcache client connection.'),
|
|
||||||
],
|
|
||||||
'ssl': [
|
'ssl': [
|
||||||
cfg.StrOpt('ca_key',
|
cfg.StrOpt('ca_key',
|
||||||
default='/etc/keystone/ssl/private/cakey.pem',
|
default='/etc/keystone/ssl/private/cakey.pem',
|
||||||
@ -1212,6 +1137,8 @@ def configure(conf=None):
|
|||||||
|
|
||||||
# register any non-default auth methods here (used by extensions, etc)
|
# register any non-default auth methods here (used by extensions, etc)
|
||||||
setup_authentication(conf)
|
setup_authentication(conf)
|
||||||
|
# add oslo.cache related config options
|
||||||
|
cache.configure(conf)
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
def list_opts():
|
||||||
|
@ -21,11 +21,11 @@ import time
|
|||||||
|
|
||||||
from dogpile.cache import api
|
from dogpile.cache import api
|
||||||
from dogpile.cache.backends import memcached
|
from dogpile.cache.backends import memcached
|
||||||
|
from oslo_cache.backends import memcache_pool
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
|
|
||||||
from keystone.common.cache.backends import memcache_pool
|
|
||||||
from keystone import exception
|
from keystone import exception
|
||||||
from keystone.i18n import _
|
from keystone.i18n import _
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ EXTENSION_DATA = {
|
|||||||
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
|
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
|
||||||
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
|
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
|
||||||
|
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='revoke')
|
MEMOIZE = cache.get_memoization_decorator(group='revoke')
|
||||||
|
|
||||||
|
|
||||||
def revoked_before_cutoff_time():
|
def revoked_before_cutoff_time():
|
||||||
|
@ -39,7 +39,7 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='identity')
|
MEMOIZE = cache.get_memoization_decorator(group='identity')
|
||||||
|
|
||||||
DOMAIN_CONF_FHEAD = 'keystone.'
|
DOMAIN_CONF_FHEAD = 'keystone.'
|
||||||
DOMAIN_CONF_FTAIL = '.conf'
|
DOMAIN_CONF_FTAIL = '.conf'
|
||||||
|
@ -30,7 +30,7 @@ from keystone import notifications
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='resource')
|
MEMOIZE = cache.get_memoization_decorator(group='resource')
|
||||||
|
|
||||||
|
|
||||||
def calc_default_domain():
|
def calc_default_domain():
|
||||||
@ -810,7 +810,7 @@ class ResourceDriverV8(object):
|
|||||||
Driver = manager.create_legacy_driver(ResourceDriverV8)
|
Driver = manager.create_legacy_driver(ResourceDriverV8)
|
||||||
|
|
||||||
|
|
||||||
MEMOIZE_CONFIG = cache.get_memoization_decorator(section='domain_config')
|
MEMOIZE_CONFIG = cache.get_memoization_decorator(group='domain_config')
|
||||||
|
|
||||||
|
|
||||||
@dependency.provider('domain_config_api')
|
@dependency.provider('domain_config_api')
|
||||||
|
@ -30,7 +30,7 @@ from keystone import trust
|
|||||||
def load_backends():
|
def load_backends():
|
||||||
|
|
||||||
# Configure and build the cache
|
# Configure and build the cache
|
||||||
cache.configure_cache_region(cache.REGION)
|
cache.configure_cache()
|
||||||
|
|
||||||
# Ensure that the identity driver is created before the assignment manager
|
# Ensure that the identity driver is created before the assignment manager
|
||||||
# and that the assignment driver is created before the resource manager.
|
# and that the assignment driver is created before the resource manager.
|
||||||
|
@ -113,7 +113,7 @@ class BaseBackendLdapIdentitySqlEverythingElse(unit.SQLDriverOverrides):
|
|||||||
super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp()
|
super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp()
|
||||||
self.clear_database()
|
self.clear_database()
|
||||||
self.load_backends()
|
self.load_backends()
|
||||||
cache.configure_cache_region(cache.REGION)
|
cache.configure_cache()
|
||||||
|
|
||||||
sqldb.recreate()
|
sqldb.recreate()
|
||||||
self.load_fixtures(default_fixtures)
|
self.load_fixtures(default_fixtures)
|
||||||
|
@ -1,135 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import six
|
|
||||||
from six.moves import queue
|
|
||||||
import testtools
|
|
||||||
from testtools import matchers
|
|
||||||
|
|
||||||
from keystone.common.cache import _memcache_pool
|
|
||||||
from keystone import exception
|
|
||||||
from keystone.tests.unit import core
|
|
||||||
|
|
||||||
|
|
||||||
class _TestConnectionPool(_memcache_pool.ConnectionPool):
|
|
||||||
destroyed_value = 'destroyed'
|
|
||||||
|
|
||||||
def _create_connection(self):
|
|
||||||
return mock.MagicMock()
|
|
||||||
|
|
||||||
def _destroy_connection(self, conn):
|
|
||||||
conn(self.destroyed_value)
|
|
||||||
|
|
||||||
|
|
||||||
class TestConnectionPool(core.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestConnectionPool, self).setUp()
|
|
||||||
self.unused_timeout = 10
|
|
||||||
self.maxsize = 2
|
|
||||||
self.connection_pool = _TestConnectionPool(
|
|
||||||
maxsize=self.maxsize,
|
|
||||||
unused_timeout=self.unused_timeout)
|
|
||||||
self.addCleanup(self.cleanup_instance('connection_pool'))
|
|
||||||
|
|
||||||
def test_get_context_manager(self):
|
|
||||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(0))
|
|
||||||
with self.connection_pool.acquire() as conn:
|
|
||||||
self.assertEqual(1, self.connection_pool._acquired)
|
|
||||||
self.assertEqual(0, self.connection_pool._acquired)
|
|
||||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
|
|
||||||
self.assertEqual(conn, self.connection_pool.queue[0].connection)
|
|
||||||
|
|
||||||
def test_cleanup_pool(self):
|
|
||||||
self.test_get_context_manager()
|
|
||||||
newtime = time.time() + self.unused_timeout * 2
|
|
||||||
non_expired_connection = _memcache_pool._PoolItem(
|
|
||||||
ttl=(newtime * 2),
|
|
||||||
connection=mock.MagicMock())
|
|
||||||
self.connection_pool.queue.append(non_expired_connection)
|
|
||||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(2))
|
|
||||||
with mock.patch.object(time, 'time', return_value=newtime):
|
|
||||||
conn = self.connection_pool.queue[0].connection
|
|
||||||
with self.connection_pool.acquire():
|
|
||||||
pass
|
|
||||||
conn.assert_has_calls(
|
|
||||||
[mock.call(self.connection_pool.destroyed_value)])
|
|
||||||
self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
|
|
||||||
self.assertEqual(0, non_expired_connection.connection.call_count)
|
|
||||||
|
|
||||||
def test_acquire_conn_exception_returns_acquired_count(self):
|
|
||||||
class TestException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
with mock.patch.object(_TestConnectionPool, '_create_connection',
|
|
||||||
side_effect=TestException):
|
|
||||||
with testtools.ExpectedException(TestException):
|
|
||||||
with self.connection_pool.acquire():
|
|
||||||
pass
|
|
||||||
self.assertThat(self.connection_pool.queue,
|
|
||||||
matchers.HasLength(0))
|
|
||||||
self.assertEqual(0, self.connection_pool._acquired)
|
|
||||||
|
|
||||||
def test_connection_pool_limits_maximum_connections(self):
|
|
||||||
# NOTE(morganfainberg): To ensure we don't lockup tests until the
|
|
||||||
# job limit, explicitly call .get_nowait() and .put_nowait() in this
|
|
||||||
# case.
|
|
||||||
conn1 = self.connection_pool.get_nowait()
|
|
||||||
conn2 = self.connection_pool.get_nowait()
|
|
||||||
|
|
||||||
# Use a nowait version to raise an Empty exception indicating we would
|
|
||||||
# not get another connection until one is placed back into the queue.
|
|
||||||
self.assertRaises(queue.Empty, self.connection_pool.get_nowait)
|
|
||||||
|
|
||||||
# Place the connections back into the pool.
|
|
||||||
self.connection_pool.put_nowait(conn1)
|
|
||||||
self.connection_pool.put_nowait(conn2)
|
|
||||||
|
|
||||||
# Make sure we can get a connection out of the pool again.
|
|
||||||
self.connection_pool.get_nowait()
|
|
||||||
|
|
||||||
def test_connection_pool_maximum_connection_get_timeout(self):
|
|
||||||
connection_pool = _TestConnectionPool(
|
|
||||||
maxsize=1,
|
|
||||||
unused_timeout=self.unused_timeout,
|
|
||||||
conn_get_timeout=0)
|
|
||||||
|
|
||||||
def _acquire_connection():
|
|
||||||
with connection_pool.acquire():
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Make sure we've consumed the only available connection from the pool
|
|
||||||
conn = connection_pool.get_nowait()
|
|
||||||
|
|
||||||
self.assertRaises(exception.UnexpectedError, _acquire_connection)
|
|
||||||
|
|
||||||
# Put the connection back and ensure we can acquire the connection
|
|
||||||
# after it is available.
|
|
||||||
connection_pool.put_nowait(conn)
|
|
||||||
_acquire_connection()
|
|
||||||
|
|
||||||
|
|
||||||
class TestMemcacheClientOverrides(core.BaseTestCase):
|
|
||||||
|
|
||||||
def test_client_stripped_of_threading_local(self):
|
|
||||||
"""threading.local overrides are restored for _MemcacheClient"""
|
|
||||||
client_class = _memcache_pool._MemcacheClient
|
|
||||||
# get the genuine thread._local from MRO
|
|
||||||
thread_local = client_class.__mro__[2]
|
|
||||||
self.assertTrue(thread_local is threading.local)
|
|
||||||
for field in six.iterkeys(thread_local.__dict__):
|
|
||||||
if field not in ('__dict__', '__weakref__'):
|
|
||||||
self.assertNotEqual(id(getattr(thread_local, field, None)),
|
|
||||||
id(getattr(client_class, field, None)))
|
|
@ -411,7 +411,7 @@ class TestCase(BaseTestCase):
|
|||||||
group='cache',
|
group='cache',
|
||||||
backend='dogpile.cache.memory',
|
backend='dogpile.cache.memory',
|
||||||
enabled=True,
|
enabled=True,
|
||||||
proxies=['keystone.tests.unit.test_cache.CacheIsolatingProxy'])
|
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
|
||||||
self.config_fixture.config(
|
self.config_fixture.config(
|
||||||
group='catalog',
|
group='catalog',
|
||||||
driver='templated',
|
driver='templated',
|
||||||
|
@ -29,8 +29,8 @@ class Cache(fixtures.Fixture):
|
|||||||
|
|
||||||
# NOTE(morganfainberg): The only way to reconfigure the CacheRegion
|
# NOTE(morganfainberg): The only way to reconfigure the CacheRegion
|
||||||
# object on each setUp() call is to remove the .backend property.
|
# object on each setUp() call is to remove the .backend property.
|
||||||
if cache.REGION.is_configured:
|
if cache.CACHE_REGION.is_configured:
|
||||||
del cache.REGION.backend
|
del cache.CACHE_REGION.backend
|
||||||
|
|
||||||
# ensure the cache region instance is setup
|
# ensure the cache region instance is setup
|
||||||
cache.configure_cache_region(cache.REGION)
|
cache.configure_cache()
|
||||||
|
@ -2311,7 +2311,7 @@ class LdapIdentitySqlAssignment(BaseLDAPIdentity, unit.SQLDriverOverrides,
|
|||||||
super(LdapIdentitySqlAssignment, self).setUp()
|
super(LdapIdentitySqlAssignment, self).setUp()
|
||||||
self.ldapdb.clear()
|
self.ldapdb.clear()
|
||||||
self.load_backends()
|
self.load_backends()
|
||||||
cache.configure_cache_region(cache.REGION)
|
cache.configure_cache()
|
||||||
|
|
||||||
sqldb.recreate()
|
sqldb.recreate()
|
||||||
self.load_fixtures(default_fixtures)
|
self.load_fixtures(default_fixtures)
|
||||||
|
@ -1,324 +0,0 @@
|
|||||||
# Copyright 2013 Metacloud
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from dogpile.cache import api
|
|
||||||
from dogpile.cache import proxy
|
|
||||||
import mock
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from keystone.common import cache
|
|
||||||
from keystone import exception
|
|
||||||
from keystone.tests import unit
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
NO_VALUE = api.NO_VALUE
|
|
||||||
|
|
||||||
|
|
||||||
def _copy_value(value):
|
|
||||||
if value is not NO_VALUE:
|
|
||||||
value = copy.deepcopy(value)
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(morganfainberg): WARNING - It is not recommended to use the Memory
|
|
||||||
# backend for dogpile.cache in a real deployment under any circumstances. The
|
|
||||||
# backend does no cleanup of expired values and therefore will leak memory. The
|
|
||||||
# backend is not implemented in a way to share data across processes (e.g.
|
|
||||||
# Keystone in HTTPD. This proxy is a hack to get around the lack of isolation
|
|
||||||
# of values in memory. Currently it blindly stores and retrieves the values
|
|
||||||
# from the cache, and modifications to dicts/lists/etc returned can result in
|
|
||||||
# changes to the cached values. In short, do not use the dogpile.cache.memory
|
|
||||||
# backend unless you are running tests or expecting odd/strange results.
|
|
||||||
class CacheIsolatingProxy(proxy.ProxyBackend):
|
|
||||||
"""Proxy that forces a memory copy of stored values.
|
|
||||||
|
|
||||||
The default in-memory cache-region does not perform a copy on values it is
|
|
||||||
meant to cache. Therefore if the value is modified after set or after get,
|
|
||||||
the cached value also is modified. This proxy does a copy as the last
|
|
||||||
thing before storing data.
|
|
||||||
|
|
||||||
"""
|
|
||||||
def get(self, key):
|
|
||||||
return _copy_value(self.proxied.get(key))
|
|
||||||
|
|
||||||
def set(self, key, value):
|
|
||||||
self.proxied.set(key, _copy_value(value))
|
|
||||||
|
|
||||||
|
|
||||||
class TestProxy(proxy.ProxyBackend):
|
|
||||||
def get(self, key):
|
|
||||||
value = _copy_value(self.proxied.get(key))
|
|
||||||
if value is not NO_VALUE:
|
|
||||||
if isinstance(value[0], TestProxyValue):
|
|
||||||
value[0].cached = True
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
class TestProxyValue(object):
|
|
||||||
def __init__(self, value):
|
|
||||||
self.value = value
|
|
||||||
self.cached = False
|
|
||||||
|
|
||||||
|
|
||||||
class CacheRegionTest(unit.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(CacheRegionTest, self).setUp()
|
|
||||||
self.region = cache.make_region()
|
|
||||||
cache.configure_cache_region(self.region)
|
|
||||||
self.region.wrap(TestProxy)
|
|
||||||
self.test_value = TestProxyValue('Decorator Test')
|
|
||||||
|
|
||||||
def _add_test_caching_option(self):
|
|
||||||
self.config_fixture.register_opt(
|
|
||||||
cfg.BoolOpt('caching', default=True), group='cache')
|
|
||||||
|
|
||||||
def _get_cacheable_function(self):
|
|
||||||
with mock.patch.object(cache.REGION, 'cache_on_arguments',
|
|
||||||
self.region.cache_on_arguments):
|
|
||||||
memoize = cache.get_memoization_decorator(section='cache')
|
|
||||||
|
|
||||||
@memoize
|
|
||||||
def cacheable_function(value):
|
|
||||||
return value
|
|
||||||
|
|
||||||
return cacheable_function
|
|
||||||
|
|
||||||
def test_region_built_with_proxy_direct_cache_test(self):
|
|
||||||
# Verify cache regions are properly built with proxies.
|
|
||||||
test_value = TestProxyValue('Direct Cache Test')
|
|
||||||
self.region.set('cache_test', test_value)
|
|
||||||
cached_value = self.region.get('cache_test')
|
|
||||||
self.assertTrue(cached_value.cached)
|
|
||||||
|
|
||||||
def test_cache_region_no_error_multiple_config(self):
|
|
||||||
# Verify configuring the CacheRegion again doesn't error.
|
|
||||||
cache.configure_cache_region(self.region)
|
|
||||||
cache.configure_cache_region(self.region)
|
|
||||||
|
|
||||||
def _get_cache_fallthrough_fn(self, cache_time):
|
|
||||||
with mock.patch.object(cache.REGION, 'cache_on_arguments',
|
|
||||||
self.region.cache_on_arguments):
|
|
||||||
memoize = cache.get_memoization_decorator(
|
|
||||||
section='cache',
|
|
||||||
expiration_section='assignment')
|
|
||||||
|
|
||||||
class _test_obj(object):
|
|
||||||
def __init__(self, value):
|
|
||||||
self.test_value = value
|
|
||||||
|
|
||||||
@memoize
|
|
||||||
def get_test_value(self):
|
|
||||||
return self.test_value
|
|
||||||
|
|
||||||
def _do_test(value):
|
|
||||||
|
|
||||||
test_obj = _test_obj(value)
|
|
||||||
|
|
||||||
# Ensure the value has been cached
|
|
||||||
test_obj.get_test_value()
|
|
||||||
# Get the now cached value
|
|
||||||
cached_value = test_obj.get_test_value()
|
|
||||||
self.assertTrue(cached_value.cached)
|
|
||||||
self.assertEqual(value.value, cached_value.value)
|
|
||||||
self.assertEqual(cached_value.value, test_obj.test_value.value)
|
|
||||||
# Change the underlying value on the test object.
|
|
||||||
test_obj.test_value = TestProxyValue(uuid.uuid4().hex)
|
|
||||||
self.assertEqual(cached_value.value,
|
|
||||||
test_obj.get_test_value().value)
|
|
||||||
# override the system time to ensure the non-cached new value
|
|
||||||
# is returned
|
|
||||||
new_time = time.time() + (cache_time * 2)
|
|
||||||
with mock.patch.object(time, 'time',
|
|
||||||
return_value=new_time):
|
|
||||||
overriden_cache_value = test_obj.get_test_value()
|
|
||||||
self.assertNotEqual(cached_value.value,
|
|
||||||
overriden_cache_value.value)
|
|
||||||
self.assertEqual(test_obj.test_value.value,
|
|
||||||
overriden_cache_value.value)
|
|
||||||
|
|
||||||
return _do_test
|
|
||||||
|
|
||||||
def test_cache_no_fallthrough_expiration_time_fn(self):
|
|
||||||
# Since we do not re-configure the cache region, for ease of testing
|
|
||||||
# this value is set the same as the expiration_time default in the
|
|
||||||
# [cache] section
|
|
||||||
cache_time = 600
|
|
||||||
expiration_time = cache.get_expiration_time_fn('role')
|
|
||||||
do_test = self._get_cache_fallthrough_fn(cache_time)
|
|
||||||
# Run the test with the assignment cache_time value
|
|
||||||
self.config_fixture.config(cache_time=cache_time,
|
|
||||||
group='role')
|
|
||||||
test_value = TestProxyValue(uuid.uuid4().hex)
|
|
||||||
self.assertEqual(cache_time, expiration_time())
|
|
||||||
do_test(value=test_value)
|
|
||||||
|
|
||||||
def test_cache_fallthrough_expiration_time_fn(self):
|
|
||||||
# Since we do not re-configure the cache region, for ease of testing
|
|
||||||
# this value is set the same as the expiration_time default in the
|
|
||||||
# [cache] section
|
|
||||||
cache_time = 599
|
|
||||||
expiration_time = cache.get_expiration_time_fn('role')
|
|
||||||
do_test = self._get_cache_fallthrough_fn(cache_time)
|
|
||||||
# Run the test with the assignment cache_time value set to None and
|
|
||||||
# the global value set.
|
|
||||||
self.config_fixture.config(cache_time=None, group='role')
|
|
||||||
test_value = TestProxyValue(uuid.uuid4().hex)
|
|
||||||
self.assertIsNone(expiration_time())
|
|
||||||
do_test(value=test_value)
|
|
||||||
|
|
||||||
def test_should_cache_fn_global_cache_enabled(self):
|
|
||||||
# Verify should_cache_fn generates a sane function for subsystem and
|
|
||||||
# functions as expected with caching globally enabled.
|
|
||||||
cacheable_function = self._get_cacheable_function()
|
|
||||||
|
|
||||||
self.config_fixture.config(group='cache', enabled=True)
|
|
||||||
cacheable_function(self.test_value)
|
|
||||||
cached_value = cacheable_function(self.test_value)
|
|
||||||
self.assertTrue(cached_value.cached)
|
|
||||||
|
|
||||||
def test_should_cache_fn_global_cache_disabled(self):
|
|
||||||
# Verify should_cache_fn generates a sane function for subsystem and
|
|
||||||
# functions as expected with caching globally disabled.
|
|
||||||
cacheable_function = self._get_cacheable_function()
|
|
||||||
|
|
||||||
self.config_fixture.config(group='cache', enabled=False)
|
|
||||||
cacheable_function(self.test_value)
|
|
||||||
cached_value = cacheable_function(self.test_value)
|
|
||||||
self.assertFalse(cached_value.cached)
|
|
||||||
|
|
||||||
def test_should_cache_fn_global_cache_disabled_section_cache_enabled(self):
|
|
||||||
# Verify should_cache_fn generates a sane function for subsystem and
|
|
||||||
# functions as expected with caching globally disabled and the specific
|
|
||||||
# section caching enabled.
|
|
||||||
cacheable_function = self._get_cacheable_function()
|
|
||||||
|
|
||||||
self._add_test_caching_option()
|
|
||||||
self.config_fixture.config(group='cache', enabled=False)
|
|
||||||
self.config_fixture.config(group='cache', caching=True)
|
|
||||||
|
|
||||||
cacheable_function(self.test_value)
|
|
||||||
cached_value = cacheable_function(self.test_value)
|
|
||||||
self.assertFalse(cached_value.cached)
|
|
||||||
|
|
||||||
def test_should_cache_fn_global_cache_enabled_section_cache_disabled(self):
|
|
||||||
# Verify should_cache_fn generates a sane function for subsystem and
|
|
||||||
# functions as expected with caching globally enabled and the specific
|
|
||||||
# section caching disabled.
|
|
||||||
cacheable_function = self._get_cacheable_function()
|
|
||||||
|
|
||||||
self._add_test_caching_option()
|
|
||||||
self.config_fixture.config(group='cache', enabled=True)
|
|
||||||
self.config_fixture.config(group='cache', caching=False)
|
|
||||||
|
|
||||||
cacheable_function(self.test_value)
|
|
||||||
cached_value = cacheable_function(self.test_value)
|
|
||||||
self.assertFalse(cached_value.cached)
|
|
||||||
|
|
||||||
def test_should_cache_fn_global_cache_enabled_section_cache_enabled(self):
|
|
||||||
# Verify should_cache_fn generates a sane function for subsystem and
|
|
||||||
# functions as expected with caching globally enabled and the specific
|
|
||||||
# section caching enabled.
|
|
||||||
cacheable_function = self._get_cacheable_function()
|
|
||||||
|
|
||||||
self._add_test_caching_option()
|
|
||||||
self.config_fixture.config(group='cache', enabled=True)
|
|
||||||
self.config_fixture.config(group='cache', caching=True)
|
|
||||||
|
|
||||||
cacheable_function(self.test_value)
|
|
||||||
cached_value = cacheable_function(self.test_value)
|
|
||||||
self.assertTrue(cached_value.cached)
|
|
||||||
|
|
||||||
def test_cache_dictionary_config_builder(self):
|
|
||||||
"""Validate we build a sane dogpile.cache dictionary config."""
|
|
||||||
self.config_fixture.config(group='cache',
|
|
||||||
config_prefix='test_prefix',
|
|
||||||
backend='some_test_backend',
|
|
||||||
expiration_time=86400,
|
|
||||||
backend_argument=['arg1:test',
|
|
||||||
'arg2:test:test',
|
|
||||||
'arg3.invalid'])
|
|
||||||
|
|
||||||
config_dict = cache.build_cache_config()
|
|
||||||
self.assertEqual(
|
|
||||||
CONF.cache.backend, config_dict['test_prefix.backend'])
|
|
||||||
self.assertEqual(
|
|
||||||
CONF.cache.expiration_time,
|
|
||||||
config_dict['test_prefix.expiration_time'])
|
|
||||||
self.assertEqual('test', config_dict['test_prefix.arguments.arg1'])
|
|
||||||
self.assertEqual('test:test',
|
|
||||||
config_dict['test_prefix.arguments.arg2'])
|
|
||||||
self.assertNotIn('test_prefix.arguments.arg3', config_dict)
|
|
||||||
|
|
||||||
def test_cache_debug_proxy(self):
|
|
||||||
single_value = 'Test Value'
|
|
||||||
single_key = 'testkey'
|
|
||||||
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
|
|
||||||
|
|
||||||
self.region.set(single_key, single_value)
|
|
||||||
self.assertEqual(single_value, self.region.get(single_key))
|
|
||||||
|
|
||||||
self.region.delete(single_key)
|
|
||||||
self.assertEqual(NO_VALUE, self.region.get(single_key))
|
|
||||||
|
|
||||||
self.region.set_multi(multi_values)
|
|
||||||
cached_values = self.region.get_multi(multi_values.keys())
|
|
||||||
for value in multi_values.values():
|
|
||||||
self.assertIn(value, cached_values)
|
|
||||||
self.assertEqual(len(multi_values.values()), len(cached_values))
|
|
||||||
|
|
||||||
self.region.delete_multi(multi_values.keys())
|
|
||||||
for value in self.region.get_multi(multi_values.keys()):
|
|
||||||
self.assertEqual(NO_VALUE, value)
|
|
||||||
|
|
||||||
def test_configure_non_region_object_raises_error(self):
|
|
||||||
self.assertRaises(exception.ValidationError,
|
|
||||||
cache.configure_cache_region,
|
|
||||||
"bogus")
|
|
||||||
|
|
||||||
|
|
||||||
class CacheNoopBackendTest(unit.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(CacheNoopBackendTest, self).setUp()
|
|
||||||
self.region = cache.make_region()
|
|
||||||
cache.configure_cache_region(self.region)
|
|
||||||
|
|
||||||
def config_overrides(self):
|
|
||||||
super(CacheNoopBackendTest, self).config_overrides()
|
|
||||||
self.config_fixture.config(group='cache',
|
|
||||||
backend='keystone.common.cache.noop')
|
|
||||||
|
|
||||||
def test_noop_backend(self):
|
|
||||||
single_value = 'Test Value'
|
|
||||||
single_key = 'testkey'
|
|
||||||
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
|
|
||||||
|
|
||||||
self.region.set(single_key, single_value)
|
|
||||||
self.assertEqual(NO_VALUE, self.region.get(single_key))
|
|
||||||
|
|
||||||
self.region.set_multi(multi_values)
|
|
||||||
cached_values = self.region.get_multi(multi_values.keys())
|
|
||||||
self.assertEqual(len(cached_values), len(multi_values.values()))
|
|
||||||
for value in cached_values:
|
|
||||||
self.assertEqual(NO_VALUE, value)
|
|
||||||
|
|
||||||
# Delete should not raise exceptions
|
|
||||||
self.region.delete(single_key)
|
|
||||||
self.region.delete_multi(multi_values.keys())
|
|
@ -1,728 +0,0 @@
|
|||||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import copy
|
|
||||||
import functools
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from dogpile.cache import api
|
|
||||||
from dogpile.cache import region as dp_region
|
|
||||||
import six
|
|
||||||
from six.moves import range
|
|
||||||
|
|
||||||
from keystone.common.cache.backends import mongo
|
|
||||||
from keystone import exception
|
|
||||||
from keystone.tests import unit
|
|
||||||
|
|
||||||
|
|
||||||
# Mock database structure sample where 'ks_cache' is database and
|
|
||||||
# 'cache' is collection. Dogpile CachedValue data is divided in two
|
|
||||||
# fields `value` (CachedValue.payload) and `meta` (CachedValue.metadata)
|
|
||||||
ks_cache = {
|
|
||||||
"cache": [
|
|
||||||
{
|
|
||||||
"value": {
|
|
||||||
"serviceType": "identity",
|
|
||||||
"allVersionsUrl": "https://dummyUrl",
|
|
||||||
"dateLastModified": "ISODDate(2014-02-08T18:39:13.237Z)",
|
|
||||||
"serviceName": "Identity",
|
|
||||||
"enabled": "True"
|
|
||||||
},
|
|
||||||
"meta": {
|
|
||||||
"v": 1,
|
|
||||||
"ct": 1392371422.015121
|
|
||||||
},
|
|
||||||
"doc_date": "ISODate('2014-02-14T09:50:22.015Z')",
|
|
||||||
"_id": "8251dc95f63842719c077072f1047ddf"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": "dummyValueX",
|
|
||||||
"meta": {
|
|
||||||
"v": 1,
|
|
||||||
"ct": 1392371422.014058
|
|
||||||
},
|
|
||||||
"doc_date": "ISODate('2014-02-14T09:50:22.014Z')",
|
|
||||||
"_id": "66730b9534d146f0804d23729ad35436"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
COLLECTIONS = {}
|
|
||||||
SON_MANIPULATOR = None
|
|
||||||
|
|
||||||
|
|
||||||
class MockCursor(object):
|
|
||||||
|
|
||||||
def __init__(self, collection, dataset_factory):
|
|
||||||
super(MockCursor, self).__init__()
|
|
||||||
self.collection = collection
|
|
||||||
self._factory = dataset_factory
|
|
||||||
self._dataset = self._factory()
|
|
||||||
self._limit = None
|
|
||||||
self._skip = None
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __next__(self):
|
|
||||||
if self._skip:
|
|
||||||
for _ in range(self._skip):
|
|
||||||
next(self._dataset)
|
|
||||||
self._skip = None
|
|
||||||
if self._limit is not None and self._limit <= 0:
|
|
||||||
raise StopIteration()
|
|
||||||
if self._limit is not None:
|
|
||||||
self._limit -= 1
|
|
||||||
return next(self._dataset)
|
|
||||||
|
|
||||||
next = __next__
|
|
||||||
|
|
||||||
def __getitem__(self, index):
|
|
||||||
arr = [x for x in self._dataset]
|
|
||||||
self._dataset = iter(arr)
|
|
||||||
return arr[index]
|
|
||||||
|
|
||||||
|
|
||||||
class MockCollection(object):
|
|
||||||
|
|
||||||
def __init__(self, db, name):
|
|
||||||
super(MockCollection, self).__init__()
|
|
||||||
self.name = name
|
|
||||||
self._collection_database = db
|
|
||||||
self._documents = {}
|
|
||||||
self.write_concern = {}
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
|
||||||
if name == 'database':
|
|
||||||
return self._collection_database
|
|
||||||
|
|
||||||
def ensure_index(self, key_or_list, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def index_information(self):
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def find_one(self, spec_or_id=None, *args, **kwargs):
|
|
||||||
if spec_or_id is None:
|
|
||||||
spec_or_id = {}
|
|
||||||
if not isinstance(spec_or_id, collections.Mapping):
|
|
||||||
spec_or_id = {'_id': spec_or_id}
|
|
||||||
|
|
||||||
try:
|
|
||||||
return next(self.find(spec_or_id, *args, **kwargs))
|
|
||||||
except StopIteration:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def find(self, spec=None, *args, **kwargs):
|
|
||||||
return MockCursor(self, functools.partial(self._get_dataset, spec))
|
|
||||||
|
|
||||||
def _get_dataset(self, spec):
|
|
||||||
dataset = (self._copy_doc(document, dict) for document in
|
|
||||||
self._iter_documents(spec))
|
|
||||||
return dataset
|
|
||||||
|
|
||||||
def _iter_documents(self, spec=None):
|
|
||||||
return (SON_MANIPULATOR.transform_outgoing(document, self) for
|
|
||||||
document in six.itervalues(self._documents)
|
|
||||||
if self._apply_filter(document, spec))
|
|
||||||
|
|
||||||
def _apply_filter(self, document, query):
|
|
||||||
for key, search in query.items():
|
|
||||||
doc_val = document.get(key)
|
|
||||||
if isinstance(search, dict):
|
|
||||||
op_dict = {'$in': lambda dv, sv: dv in sv}
|
|
||||||
is_match = all(
|
|
||||||
op_str in op_dict and op_dict[op_str](doc_val, search_val)
|
|
||||||
for op_str, search_val in search.items()
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
is_match = doc_val == search
|
|
||||||
|
|
||||||
return is_match
|
|
||||||
|
|
||||||
def _copy_doc(self, obj, container):
|
|
||||||
if isinstance(obj, list):
|
|
||||||
new = []
|
|
||||||
for item in obj:
|
|
||||||
new.append(self._copy_doc(item, container))
|
|
||||||
return new
|
|
||||||
if isinstance(obj, dict):
|
|
||||||
new = container()
|
|
||||||
for key, value in list(obj.items()):
|
|
||||||
new[key] = self._copy_doc(value, container)
|
|
||||||
return new
|
|
||||||
else:
|
|
||||||
return copy.copy(obj)
|
|
||||||
|
|
||||||
def insert(self, data, manipulate=True, **kwargs):
|
|
||||||
if isinstance(data, list):
|
|
||||||
return [self._insert(element) for element in data]
|
|
||||||
return self._insert(data)
|
|
||||||
|
|
||||||
def save(self, data, manipulate=True, **kwargs):
|
|
||||||
return self._insert(data)
|
|
||||||
|
|
||||||
def _insert(self, data):
|
|
||||||
if '_id' not in data:
|
|
||||||
data['_id'] = uuid.uuid4().hex
|
|
||||||
object_id = data['_id']
|
|
||||||
self._documents[object_id] = self._internalize_dict(data)
|
|
||||||
return object_id
|
|
||||||
|
|
||||||
def find_and_modify(self, spec, document, upsert=False, **kwargs):
|
|
||||||
self.update(spec, document, upsert, **kwargs)
|
|
||||||
|
|
||||||
def update(self, spec, document, upsert=False, **kwargs):
|
|
||||||
|
|
||||||
existing_docs = [doc for doc in six.itervalues(self._documents)
|
|
||||||
if self._apply_filter(doc, spec)]
|
|
||||||
if existing_docs:
|
|
||||||
existing_doc = existing_docs[0] # should find only 1 match
|
|
||||||
_id = existing_doc['_id']
|
|
||||||
existing_doc.clear()
|
|
||||||
existing_doc['_id'] = _id
|
|
||||||
existing_doc.update(self._internalize_dict(document))
|
|
||||||
elif upsert:
|
|
||||||
existing_doc = self._documents[self._insert(document)]
|
|
||||||
|
|
||||||
def _internalize_dict(self, d):
|
|
||||||
return {k: copy.deepcopy(v) for k, v in d.items()}
|
|
||||||
|
|
||||||
def remove(self, spec_or_id=None, search_filter=None):
|
|
||||||
"""Remove objects matching spec_or_id from the collection."""
|
|
||||||
if spec_or_id is None:
|
|
||||||
spec_or_id = search_filter if search_filter else {}
|
|
||||||
if not isinstance(spec_or_id, dict):
|
|
||||||
spec_or_id = {'_id': spec_or_id}
|
|
||||||
to_delete = list(self.find(spec=spec_or_id))
|
|
||||||
for doc in to_delete:
|
|
||||||
doc_id = doc['_id']
|
|
||||||
del self._documents[doc_id]
|
|
||||||
|
|
||||||
return {
|
|
||||||
"connectionId": uuid.uuid4().hex,
|
|
||||||
"n": len(to_delete),
|
|
||||||
"ok": 1.0,
|
|
||||||
"err": None,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class MockMongoDB(object):
|
|
||||||
def __init__(self, dbname):
|
|
||||||
self._dbname = dbname
|
|
||||||
self.mainpulator = None
|
|
||||||
|
|
||||||
def authenticate(self, username, password):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def add_son_manipulator(self, manipulator):
|
|
||||||
global SON_MANIPULATOR
|
|
||||||
SON_MANIPULATOR = manipulator
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
|
||||||
if name == 'authenticate':
|
|
||||||
return self.authenticate
|
|
||||||
elif name == 'name':
|
|
||||||
return self._dbname
|
|
||||||
elif name == 'add_son_manipulator':
|
|
||||||
return self.add_son_manipulator
|
|
||||||
else:
|
|
||||||
return get_collection(self._dbname, name)
|
|
||||||
|
|
||||||
def __getitem__(self, name):
|
|
||||||
return get_collection(self._dbname, name)
|
|
||||||
|
|
||||||
|
|
||||||
class MockMongoClient(object):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __getattr__(self, dbname):
|
|
||||||
return MockMongoDB(dbname)
|
|
||||||
|
|
||||||
|
|
||||||
def get_collection(db_name, collection_name):
|
|
||||||
mongo_collection = MockCollection(MockMongoDB(db_name), collection_name)
|
|
||||||
return mongo_collection
|
|
||||||
|
|
||||||
|
|
||||||
def pymongo_override():
|
|
||||||
global pymongo
|
|
||||||
import pymongo
|
|
||||||
if pymongo.MongoClient is not MockMongoClient:
|
|
||||||
pymongo.MongoClient = MockMongoClient
|
|
||||||
if pymongo.MongoReplicaSetClient is not MockMongoClient:
|
|
||||||
pymongo.MongoClient = MockMongoClient
|
|
||||||
|
|
||||||
|
|
||||||
class MyTransformer(mongo.BaseTransform):
|
|
||||||
"""Added here just to check manipulator logic is used correctly."""
|
|
||||||
|
|
||||||
def transform_incoming(self, son, collection):
|
|
||||||
return super(MyTransformer, self).transform_incoming(son, collection)
|
|
||||||
|
|
||||||
def transform_outgoing(self, son, collection):
|
|
||||||
return super(MyTransformer, self).transform_outgoing(son, collection)
|
|
||||||
|
|
||||||
|
|
||||||
class MongoCache(unit.BaseTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(MongoCache, self).setUp()
|
|
||||||
global COLLECTIONS
|
|
||||||
COLLECTIONS = {}
|
|
||||||
mongo.MongoApi._DB = {}
|
|
||||||
mongo.MongoApi._MONGO_COLLS = {}
|
|
||||||
pymongo_override()
|
|
||||||
# using typical configuration
|
|
||||||
self.arguments = {
|
|
||||||
'db_hosts': 'localhost:27017',
|
|
||||||
'db_name': 'ks_cache',
|
|
||||||
'cache_collection': 'cache',
|
|
||||||
'username': 'test_user',
|
|
||||||
'password': 'test_password'
|
|
||||||
}
|
|
||||||
|
|
||||||
def test_missing_db_hosts(self):
|
|
||||||
self.arguments.pop('db_hosts')
|
|
||||||
region = dp_region.make_region()
|
|
||||||
self.assertRaises(exception.ValidationError, region.configure,
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
def test_missing_db_name(self):
|
|
||||||
self.arguments.pop('db_name')
|
|
||||||
region = dp_region.make_region()
|
|
||||||
self.assertRaises(exception.ValidationError, region.configure,
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
def test_missing_cache_collection_name(self):
|
|
||||||
self.arguments.pop('cache_collection')
|
|
||||||
region = dp_region.make_region()
|
|
||||||
self.assertRaises(exception.ValidationError, region.configure,
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
def test_incorrect_write_concern(self):
|
|
||||||
self.arguments['w'] = 'one value'
|
|
||||||
region = dp_region.make_region()
|
|
||||||
self.assertRaises(exception.ValidationError, region.configure,
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
def test_correct_write_concern(self):
|
|
||||||
self.arguments['w'] = 1
|
|
||||||
region = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue10")
|
|
||||||
# There is no proxy so can access MongoCacheBackend directly
|
|
||||||
self.assertEqual(1, region.backend.api.w)
|
|
||||||
|
|
||||||
def test_incorrect_read_preference(self):
|
|
||||||
self.arguments['read_preference'] = 'inValidValue'
|
|
||||||
region = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
# As per delayed loading of pymongo, read_preference value should
|
|
||||||
# still be string and NOT enum
|
|
||||||
self.assertEqual('inValidValue', region.backend.api.read_preference)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
self.assertRaises(ValueError, region.set,
|
|
||||||
random_key, "dummyValue10")
|
|
||||||
|
|
||||||
def test_correct_read_preference(self):
|
|
||||||
self.arguments['read_preference'] = 'secondaryPreferred'
|
|
||||||
region = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
# As per delayed loading of pymongo, read_preference value should
|
|
||||||
# still be string and NOT enum
|
|
||||||
self.assertEqual('secondaryPreferred',
|
|
||||||
region.backend.api.read_preference)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue10")
|
|
||||||
|
|
||||||
# Now as pymongo is loaded so expected read_preference value is enum.
|
|
||||||
# There is no proxy so can access MongoCacheBackend directly
|
|
||||||
self.assertEqual(3, region.backend.api.read_preference)
|
|
||||||
|
|
||||||
def test_missing_replica_set_name(self):
|
|
||||||
self.arguments['use_replica'] = True
|
|
||||||
region = dp_region.make_region()
|
|
||||||
self.assertRaises(exception.ValidationError, region.configure,
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
def test_provided_replica_set_name(self):
|
|
||||||
self.arguments['use_replica'] = True
|
|
||||||
self.arguments['replicaset_name'] = 'my_replica'
|
|
||||||
dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
self.assertTrue(True) # reached here means no initialization error
|
|
||||||
|
|
||||||
def test_incorrect_mongo_ttl_seconds(self):
|
|
||||||
self.arguments['mongo_ttl_seconds'] = 'sixty'
|
|
||||||
region = dp_region.make_region()
|
|
||||||
self.assertRaises(exception.ValidationError, region.configure,
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
|
|
||||||
def test_cache_configuration_values_assertion(self):
|
|
||||||
self.arguments['use_replica'] = True
|
|
||||||
self.arguments['replicaset_name'] = 'my_replica'
|
|
||||||
self.arguments['mongo_ttl_seconds'] = 60
|
|
||||||
self.arguments['ssl'] = False
|
|
||||||
region = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
# There is no proxy so can access MongoCacheBackend directly
|
|
||||||
self.assertEqual('localhost:27017', region.backend.api.hosts)
|
|
||||||
self.assertEqual('ks_cache', region.backend.api.db_name)
|
|
||||||
self.assertEqual('cache', region.backend.api.cache_collection)
|
|
||||||
self.assertEqual('test_user', region.backend.api.username)
|
|
||||||
self.assertEqual('test_password', region.backend.api.password)
|
|
||||||
self.assertEqual(True, region.backend.api.use_replica)
|
|
||||||
self.assertEqual('my_replica', region.backend.api.replicaset_name)
|
|
||||||
self.assertEqual(False, region.backend.api.conn_kwargs['ssl'])
|
|
||||||
self.assertEqual(60, region.backend.api.ttl_seconds)
|
|
||||||
|
|
||||||
def test_multiple_region_cache_configuration(self):
|
|
||||||
arguments1 = copy.copy(self.arguments)
|
|
||||||
arguments1['cache_collection'] = 'cache_region1'
|
|
||||||
|
|
||||||
region1 = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=arguments1)
|
|
||||||
# There is no proxy so can access MongoCacheBackend directly
|
|
||||||
self.assertEqual('localhost:27017', region1.backend.api.hosts)
|
|
||||||
self.assertEqual('ks_cache', region1.backend.api.db_name)
|
|
||||||
self.assertEqual('cache_region1', region1.backend.api.cache_collection)
|
|
||||||
self.assertEqual('test_user', region1.backend.api.username)
|
|
||||||
self.assertEqual('test_password', region1.backend.api.password)
|
|
||||||
# Should be None because of delayed initialization
|
|
||||||
self.assertIsNone(region1.backend.api._data_manipulator)
|
|
||||||
|
|
||||||
random_key1 = uuid.uuid4().hex
|
|
||||||
region1.set(random_key1, "dummyValue10")
|
|
||||||
self.assertEqual("dummyValue10", region1.get(random_key1))
|
|
||||||
# Now should have initialized
|
|
||||||
self.assertIsInstance(region1.backend.api._data_manipulator,
|
|
||||||
mongo.BaseTransform)
|
|
||||||
|
|
||||||
class_name = '%s.%s' % (MyTransformer.__module__, "MyTransformer")
|
|
||||||
|
|
||||||
arguments2 = copy.copy(self.arguments)
|
|
||||||
arguments2['cache_collection'] = 'cache_region2'
|
|
||||||
arguments2['son_manipulator'] = class_name
|
|
||||||
|
|
||||||
region2 = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=arguments2)
|
|
||||||
# There is no proxy so can access MongoCacheBackend directly
|
|
||||||
self.assertEqual('localhost:27017', region2.backend.api.hosts)
|
|
||||||
self.assertEqual('ks_cache', region2.backend.api.db_name)
|
|
||||||
self.assertEqual('cache_region2', region2.backend.api.cache_collection)
|
|
||||||
|
|
||||||
# Should be None because of delayed initialization
|
|
||||||
self.assertIsNone(region2.backend.api._data_manipulator)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region2.set(random_key, "dummyValue20")
|
|
||||||
self.assertEqual("dummyValue20", region2.get(random_key))
|
|
||||||
# Now should have initialized
|
|
||||||
self.assertIsInstance(region2.backend.api._data_manipulator,
|
|
||||||
MyTransformer)
|
|
||||||
|
|
||||||
region1.set(random_key1, "dummyValue22")
|
|
||||||
self.assertEqual("dummyValue22", region1.get(random_key1))
|
|
||||||
|
|
||||||
def test_typical_configuration(self):
|
|
||||||
|
|
||||||
dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
self.assertTrue(True) # reached here means no initialization error
|
|
||||||
|
|
||||||
def test_backend_get_missing_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_set_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue")
|
|
||||||
self.assertEqual("dummyValue", region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_set_data_with_string_as_valid_ttl(self):
|
|
||||||
|
|
||||||
self.arguments['mongo_ttl_seconds'] = '3600'
|
|
||||||
region = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
self.assertEqual(3600, region.backend.api.ttl_seconds)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue")
|
|
||||||
self.assertEqual("dummyValue", region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_set_data_with_int_as_valid_ttl(self):
|
|
||||||
|
|
||||||
self.arguments['mongo_ttl_seconds'] = 1800
|
|
||||||
region = dp_region.make_region().configure('keystone.cache.mongo',
|
|
||||||
arguments=self.arguments)
|
|
||||||
self.assertEqual(1800, region.backend.api.ttl_seconds)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue")
|
|
||||||
self.assertEqual("dummyValue", region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_set_none_as_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, None)
|
|
||||||
self.assertIsNone(region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_set_blank_as_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "")
|
|
||||||
self.assertEqual("", region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_set_same_key_multiple_times(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue")
|
|
||||||
self.assertEqual("dummyValue", region.get(random_key))
|
|
||||||
|
|
||||||
dict_value = {'key1': 'value1'}
|
|
||||||
region.set(random_key, dict_value)
|
|
||||||
self.assertEqual(dict_value, region.get(random_key))
|
|
||||||
|
|
||||||
region.set(random_key, "dummyValue2")
|
|
||||||
self.assertEqual("dummyValue2", region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_multi_set_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
random_key1 = uuid.uuid4().hex
|
|
||||||
random_key2 = uuid.uuid4().hex
|
|
||||||
random_key3 = uuid.uuid4().hex
|
|
||||||
mapping = {random_key1: 'dummyValue1',
|
|
||||||
random_key2: 'dummyValue2',
|
|
||||||
random_key3: 'dummyValue3'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
self.assertFalse(region.get(random_key))
|
|
||||||
self.assertEqual("dummyValue1", region.get(random_key1))
|
|
||||||
self.assertEqual("dummyValue2", region.get(random_key2))
|
|
||||||
self.assertEqual("dummyValue3", region.get(random_key3))
|
|
||||||
|
|
||||||
def test_backend_multi_get_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
random_key1 = uuid.uuid4().hex
|
|
||||||
random_key2 = uuid.uuid4().hex
|
|
||||||
random_key3 = uuid.uuid4().hex
|
|
||||||
mapping = {random_key1: 'dummyValue1',
|
|
||||||
random_key2: '',
|
|
||||||
random_key3: 'dummyValue3'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
|
|
||||||
keys = [random_key, random_key1, random_key2, random_key3]
|
|
||||||
results = region.get_multi(keys)
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, results[0])
|
|
||||||
self.assertEqual("dummyValue1", results[1])
|
|
||||||
self.assertEqual("", results[2])
|
|
||||||
self.assertEqual("dummyValue3", results[3])
|
|
||||||
|
|
||||||
def test_backend_multi_set_should_update_existing(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
random_key1 = uuid.uuid4().hex
|
|
||||||
random_key2 = uuid.uuid4().hex
|
|
||||||
random_key3 = uuid.uuid4().hex
|
|
||||||
mapping = {random_key1: 'dummyValue1',
|
|
||||||
random_key2: 'dummyValue2',
|
|
||||||
random_key3: 'dummyValue3'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
self.assertEqual("dummyValue1", region.get(random_key1))
|
|
||||||
self.assertEqual("dummyValue2", region.get(random_key2))
|
|
||||||
self.assertEqual("dummyValue3", region.get(random_key3))
|
|
||||||
|
|
||||||
mapping = {random_key1: 'dummyValue4',
|
|
||||||
random_key2: 'dummyValue5'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
self.assertEqual("dummyValue4", region.get(random_key1))
|
|
||||||
self.assertEqual("dummyValue5", region.get(random_key2))
|
|
||||||
self.assertEqual("dummyValue3", region.get(random_key3))
|
|
||||||
|
|
||||||
def test_backend_multi_set_get_with_blanks_none(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
random_key1 = uuid.uuid4().hex
|
|
||||||
random_key2 = uuid.uuid4().hex
|
|
||||||
random_key3 = uuid.uuid4().hex
|
|
||||||
random_key4 = uuid.uuid4().hex
|
|
||||||
mapping = {random_key1: 'dummyValue1',
|
|
||||||
random_key2: None,
|
|
||||||
random_key3: '',
|
|
||||||
random_key4: 'dummyValue4'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
self.assertEqual("dummyValue1", region.get(random_key1))
|
|
||||||
self.assertIsNone(region.get(random_key2))
|
|
||||||
self.assertEqual("", region.get(random_key3))
|
|
||||||
self.assertEqual("dummyValue4", region.get(random_key4))
|
|
||||||
|
|
||||||
keys = [random_key, random_key1, random_key2, random_key3, random_key4]
|
|
||||||
results = region.get_multi(keys)
|
|
||||||
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, results[0])
|
|
||||||
self.assertEqual("dummyValue1", results[1])
|
|
||||||
self.assertIsNone(results[2])
|
|
||||||
self.assertEqual("", results[3])
|
|
||||||
self.assertEqual("dummyValue4", results[4])
|
|
||||||
|
|
||||||
mapping = {random_key1: 'dummyValue5',
|
|
||||||
random_key2: 'dummyValue6'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
self.assertEqual("dummyValue5", region.get(random_key1))
|
|
||||||
self.assertEqual("dummyValue6", region.get(random_key2))
|
|
||||||
self.assertEqual("", region.get(random_key3))
|
|
||||||
|
|
||||||
def test_backend_delete_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue")
|
|
||||||
self.assertEqual("dummyValue", region.get(random_key))
|
|
||||||
|
|
||||||
region.delete(random_key)
|
|
||||||
# should return NO_VALUE as key no longer exists in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
|
|
||||||
def test_backend_multi_delete_data(self):
|
|
||||||
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
random_key1 = uuid.uuid4().hex
|
|
||||||
random_key2 = uuid.uuid4().hex
|
|
||||||
random_key3 = uuid.uuid4().hex
|
|
||||||
mapping = {random_key1: 'dummyValue1',
|
|
||||||
random_key2: 'dummyValue2',
|
|
||||||
random_key3: 'dummyValue3'}
|
|
||||||
region.set_multi(mapping)
|
|
||||||
# should return NO_VALUE as key does not exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key))
|
|
||||||
self.assertEqual("dummyValue1", region.get(random_key1))
|
|
||||||
self.assertEqual("dummyValue2", region.get(random_key2))
|
|
||||||
self.assertEqual("dummyValue3", region.get(random_key3))
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get("InvalidKey"))
|
|
||||||
|
|
||||||
keys = mapping.keys()
|
|
||||||
|
|
||||||
region.delete_multi(keys)
|
|
||||||
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get("InvalidKey"))
|
|
||||||
# should return NO_VALUE as keys no longer exist in cache
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key1))
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key2))
|
|
||||||
self.assertEqual(api.NO_VALUE, region.get(random_key3))
|
|
||||||
|
|
||||||
def test_additional_crud_method_arguments_support(self):
|
|
||||||
"""Additional arguments should works across find/insert/update."""
|
|
||||||
|
|
||||||
self.arguments['wtimeout'] = 30000
|
|
||||||
self.arguments['j'] = True
|
|
||||||
self.arguments['continue_on_error'] = True
|
|
||||||
self.arguments['secondary_acceptable_latency_ms'] = 60
|
|
||||||
region = dp_region.make_region().configure(
|
|
||||||
'keystone.cache.mongo',
|
|
||||||
arguments=self.arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
# There is no proxy so can access MongoCacheBackend directly
|
|
||||||
api_methargs = region.backend.api.meth_kwargs
|
|
||||||
self.assertEqual(30000, api_methargs['wtimeout'])
|
|
||||||
self.assertEqual(True, api_methargs['j'])
|
|
||||||
self.assertEqual(True, api_methargs['continue_on_error'])
|
|
||||||
self.assertEqual(60, api_methargs['secondary_acceptable_latency_ms'])
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue1")
|
|
||||||
self.assertEqual("dummyValue1", region.get(random_key))
|
|
||||||
|
|
||||||
region.set(random_key, "dummyValue2")
|
|
||||||
self.assertEqual("dummyValue2", region.get(random_key))
|
|
||||||
|
|
||||||
random_key = uuid.uuid4().hex
|
|
||||||
region.set(random_key, "dummyValue3")
|
|
||||||
self.assertEqual("dummyValue3", region.get(random_key))
|
|
@ -168,7 +168,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
|
|||||||
|
|
||||||
def load_backends(self):
|
def load_backends(self):
|
||||||
# ensure the cache region instance is setup
|
# ensure the cache region instance is setup
|
||||||
cache.configure_cache_region(cache.REGION)
|
cache.configure_cache()
|
||||||
|
|
||||||
super(RestfulTestCase, self).load_backends()
|
super(RestfulTestCase, self).load_backends()
|
||||||
|
|
||||||
|
@ -32,9 +32,9 @@ from keystone.token import utils
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='token')
|
MEMOIZE = cache.get_memoization_decorator(group='token')
|
||||||
REVOCATION_MEMOIZE = cache.get_memoization_decorator(
|
REVOCATION_MEMOIZE = cache.get_memoization_decorator(group='token',
|
||||||
section='token', expiration_section='revoke')
|
expiration_group='revoke')
|
||||||
|
|
||||||
|
|
||||||
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
|
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
|
||||||
|
@ -38,7 +38,7 @@ from keystone.token import utils
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
MEMOIZE = cache.get_memoization_decorator(section='token')
|
MEMOIZE = cache.get_memoization_decorator(group='token')
|
||||||
|
|
||||||
# NOTE(morganfainberg): This is for compatibility in case someone was relying
|
# NOTE(morganfainberg): This is for compatibility in case someone was relying
|
||||||
# on the old location of the UnsupportedTokenVersionException for their code.
|
# on the old location of the UnsupportedTokenVersionException for their code.
|
||||||
|
@ -18,6 +18,7 @@ stevedore>=1.5.0 # Apache-2.0
|
|||||||
passlib>=1.6
|
passlib>=1.6
|
||||||
python-keystoneclient>=1.6.0
|
python-keystoneclient>=1.6.0
|
||||||
keystonemiddleware>=2.0.0
|
keystonemiddleware>=2.0.0
|
||||||
|
oslo.cache>=0.8.0 # Apache-2.0
|
||||||
oslo.concurrency>=2.3.0 # Apache-2.0
|
oslo.concurrency>=2.3.0 # Apache-2.0
|
||||||
oslo.config>=2.3.0 # Apache-2.0
|
oslo.config>=2.3.0 # Apache-2.0
|
||||||
oslo.context>=0.2.0 # Apache-2.0
|
oslo.context>=0.2.0 # Apache-2.0
|
||||||
|
18
tox.ini
18
tox.ini
@ -20,33 +20,43 @@ deps = -r{toxinidir}/test-requirements.txt
|
|||||||
commands =
|
commands =
|
||||||
nosetests --with-coverage --cover-package=keystone \
|
nosetests --with-coverage --cover-package=keystone \
|
||||||
keystone/tests/unit/auth/test_controllers.py \
|
keystone/tests/unit/auth/test_controllers.py \
|
||||||
|
keystone/tests/unit/backend/domain_config/test_sql.py \
|
||||||
|
keystone/tests/unit/backend/role/test_sql.py \
|
||||||
keystone/tests/unit/catalog/test_core.py \
|
keystone/tests/unit/catalog/test_core.py \
|
||||||
keystone/tests/unit/common/test_injection.py \
|
keystone/tests/unit/common/test_injection.py \
|
||||||
keystone/tests/unit/common/test_json_home.py \
|
keystone/tests/unit/common/test_json_home.py \
|
||||||
|
keystone/tests/unit/common/test_manager.py \
|
||||||
keystone/tests/unit/common/test_sql_core.py \
|
keystone/tests/unit/common/test_sql_core.py \
|
||||||
keystone/tests/unit/common/test_utils.py \
|
keystone/tests/unit/common/test_utils.py \
|
||||||
|
keystone/tests/unit/contrib/federation/test_utils.py \
|
||||||
|
keystone/tests/unit/external/test_timeutils.py \
|
||||||
keystone/tests/unit/test_auth_plugin.py \
|
keystone/tests/unit/test_auth_plugin.py \
|
||||||
keystone/tests/unit/test_backend.py \
|
|
||||||
keystone/tests/unit/test_backend_endpoint_policy.py \
|
keystone/tests/unit/test_backend_endpoint_policy.py \
|
||||||
|
keystone/tests/unit/test_backend_endpoint_policy_sql.py \
|
||||||
|
keystone/tests/unit/test_backend_federation_sql.py \
|
||||||
|
keystone/tests/unit/test_backend_id_mapping_sql.py \
|
||||||
|
keystone/tests/unit/test_backend.py \
|
||||||
keystone/tests/unit/test_backend_rules.py \
|
keystone/tests/unit/test_backend_rules.py \
|
||||||
keystone/tests/unit/test_cache_backend_mongo.py \
|
|
||||||
keystone/tests/unit/test_config.py \
|
keystone/tests/unit/test_config.py \
|
||||||
|
keystone/tests/unit/test_contrib_ec2.py \
|
||||||
keystone/tests/unit/test_contrib_s3_core.py \
|
keystone/tests/unit/test_contrib_s3_core.py \
|
||||||
keystone/tests/unit/test_driver_hints.py \
|
keystone/tests/unit/test_driver_hints.py \
|
||||||
keystone/tests/unit/test_exception.py \
|
keystone/tests/unit/test_exception.py \
|
||||||
|
keystone/tests/unit/test_ipv6.py \
|
||||||
keystone/tests/unit/test_policy.py \
|
keystone/tests/unit/test_policy.py \
|
||||||
keystone/tests/unit/test_singular_plural.py \
|
keystone/tests/unit/test_singular_plural.py \
|
||||||
keystone/tests/unit/test_sql_livetest.py \
|
keystone/tests/unit/test_sql_livetest.py \
|
||||||
keystone/tests/unit/test_sql_migrate_extensions.py \
|
keystone/tests/unit/test_sql_migrate_extensions.py \
|
||||||
keystone/tests/unit/test_sql_upgrade.py \
|
keystone/tests/unit/test_sql_upgrade.py \
|
||||||
keystone/tests/unit/test_ssl.py \
|
keystone/tests/unit/test_ssl.py \
|
||||||
|
keystone/tests/unit/tests/test_core.py \
|
||||||
|
keystone/tests/unit/tests/test_utils.py \
|
||||||
keystone/tests/unit/test_token_bind.py \
|
keystone/tests/unit/test_token_bind.py \
|
||||||
keystone/tests/unit/test_url_middleware.py \
|
keystone/tests/unit/test_url_middleware.py \
|
||||||
|
keystone/tests/unit/test_v2_controller.py \
|
||||||
keystone/tests/unit/test_v3_controller.py \
|
keystone/tests/unit/test_v3_controller.py \
|
||||||
keystone/tests/unit/test_validation.py \
|
keystone/tests/unit/test_validation.py \
|
||||||
keystone/tests/unit/test_wsgi.py \
|
keystone/tests/unit/test_wsgi.py \
|
||||||
keystone/tests/unit/tests/test_core.py \
|
|
||||||
keystone/tests/unit/tests/test_utils.py \
|
|
||||||
keystone/tests/unit/token/test_pki_provider.py \
|
keystone/tests/unit/token/test_pki_provider.py \
|
||||||
keystone/tests/unit/token/test_pkiz_provider.py \
|
keystone/tests/unit/token/test_pkiz_provider.py \
|
||||||
keystone/tests/unit/token/test_token_model.py \
|
keystone/tests/unit/token/test_token_model.py \
|
||||||
|
Loading…
Reference in New Issue
Block a user