Switch to oslo.cache lib

Common memorycache was replaced by analogous tool
from oslo.cache lib. In-memory cache was replaced
by oslo.cache.dict backend. Memcached was replaced
by dogpile.cache.memcached backend.

Implements blueprint oslo-for-mitaka

Closes-Bug: #1483322
Co-Authored-By: Sergey Nikitin <snikitin@mirantis.com>
Co-Authored-By: Pavel Kholkin <pkholkin@mirantis.com>

Change-Id: I371f7a68e6a6c1c4cd101f61b9ad96c15187a80e
This commit is contained in:
Davanum Srinivas 2016-01-29 12:50:58 -05:00
parent 5eed75332f
commit 205fb7c8b3
17 changed files with 408 additions and 157 deletions

View File

@ -4,12 +4,13 @@ wrap_width = 79
namespace = nova
namespace = nova.conf
namespace = nova.api
namespace = nova.cache_utils
namespace = nova.cells
namespace = nova.compute
namespace = nova.network
namespace = nova.network.neutronv2
namespace = nova.virt
namespace = nova.openstack.common.memorycache
namespace = oslo.cache
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy

View File

@ -22,6 +22,7 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from nova import cache_utils
from nova import context
from nova import exception
from nova.i18n import _
@ -29,7 +30,6 @@ from nova.i18n import _LI
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import memorycache
LOG = logging.getLogger(__name__)
# NOTE(vish): cache mapping for one week
@ -42,13 +42,13 @@ def memoize(func):
def memoizer(context, reqid):
global _CACHE
if not _CACHE:
_CACHE = memorycache.get_client()
_CACHE = cache_utils.get_client(expiration_time=_CACHE_TIME)
key = "%s:%s" % (func.__name__, reqid)
key = str(key)
value = _CACHE.get(key)
if value is None:
value = func(context, reqid)
_CACHE.set(key, value, time=_CACHE_TIME)
_CACHE.set(key, value)
return value
return memoizer

View File

@ -26,13 +26,13 @@ import webob.dec
import webob.exc
from nova.api.metadata import base
from nova import cache_utils
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.network.neutronv2 import api as neutronapi
from nova.openstack.common import memorycache
from nova import utils
from nova import wsgi
@ -72,7 +72,8 @@ class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
self._cache = memorycache.get_client()
self._cache = cache_utils.get_client(
expiration_time=CONF.metadata_cache_expiration)
def get_metadata_by_remote_address(self, address):
if not address:
@ -90,7 +91,7 @@ class MetadataRequestHandler(wsgi.Application):
return None
if CONF.metadata_cache_expiration > 0:
self._cache.set(cache_key, data, CONF.metadata_cache_expiration)
self._cache.set(cache_key, data)
return data
@ -107,7 +108,7 @@ class MetadataRequestHandler(wsgi.Application):
return None
if CONF.metadata_cache_expiration > 0:
self._cache.set(cache_key, data, CONF.metadata_cache_expiration)
self._cache.set(cache_key, data)
return data
@ -254,7 +255,7 @@ class MetadataRequestHandler(wsgi.Application):
instance_id = instance_data['device_id']
tenant_id = instance_data['tenant_id']
# instance_data is unicode-encoded, while memorycache doesn't like
# instance_data is unicode-encoded, while cache_utils doesn't like
# that. Therefore we convert to str
if isinstance(instance_id, six.text_type):
instance_id = instance_id.encode('utf-8')

View File

@ -19,8 +19,8 @@ import collections
from oslo_config import cfg
from nova import cache_utils
from nova import objects
from nova.openstack.common import memorycache
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
@ -44,7 +44,7 @@ def _get_cache():
global MC
if MC is None:
MC = memorycache.get_client()
MC = cache_utils.get_client(expiration_time=AZ_CACHE_SECONDS)
return MC
@ -113,7 +113,7 @@ def update_host_availability_zone_cache(context, host, availability_zone=None):
cache = _get_cache()
cache_key = _make_cache_key(host)
cache.delete(cache_key)
cache.set(cache_key, availability_zone, AZ_CACHE_SECONDS)
cache.set(cache_key, availability_zone)
def get_availability_zones(context, get_only_available=False,
@ -195,5 +195,5 @@ def get_instance_availability_zone(context, instance):
if not az:
elevated = context.elevated()
az = get_host_availability_zone(elevated, host)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
cache.set(cache_key, az)
return az

174
nova/cache_utils.py Normal file
View File

@ -0,0 +1,174 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Super simple fake memcache client."""
import copy
from oslo_cache import core as cache
from oslo_config import cfg
from nova.i18n import _
# NOTE(dims): There are many copies of memcache_opts with memcached_servers
# in various projects as this used to be in a copy of memory_cache.py
# Since we are making a change in just our copy, oslo-config-generator fails
# with cfg.DuplicateOptError unless we override the comparison check
class _DeprecatedListOpt(cfg.ListOpt):
def __ne__(self, another):
self_dict = copy.deepcopy(vars(self))
another_dict = copy.deepcopy(vars(another))
self_dict.pop('help')
self_dict.pop('deprecated_for_removal')
another_dict.pop('help')
another_dict.pop('deprecated_for_removal')
return self_dict != another_dict
memcache_opts = [
_DeprecatedListOpt('memcached_servers',
help='DEPRECATED: Memcached servers or None for in '
'process cache. "memcached_servers" opt is '
'deprecated in Mitaka. In Newton release '
'oslo.cache config options should be used as '
'this option will be removed. Please add a '
'[cache] group in your nova.conf file and '
'add "enable" and "memcache_servers" option in '
'this section.',
deprecated_for_removal=True),
]
CONF = cfg.CONF
CONF.register_opts(memcache_opts)
WEEK = 604800
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(memcache_opts))]
def get_memcached_client(expiration_time=0):
"""Used ONLY when memcached is explicitly needed."""
# If the operator uses the old style [DEFAULT]/memcached_servers
# then we just respect that setting
if CONF.memcached_servers:
return CacheClient(
_get_custom_cache_region(expiration_time=expiration_time,
backend='dogpile.cache.memcached',
url=CONF.memcached_servers))
# If the operator still uses the new style [cache]/memcache_servers
# and has [cache]/enabled flag on then we let oslo_cache configure
# the region from the configuration settings
elif CONF.cache.enabled and CONF.cache.memcache_servers:
return CacheClient(
_get_default_cache_region(expiration_time=expiration_time))
raise RuntimeError(_('memcached_servers not defined'))
def get_client(expiration_time=0):
"""Used to get a caching client."""
# If the operator still uses the old style [DEFAULT]/memcached_servers
# then we just respect that setting
if CONF.memcached_servers:
return CacheClient(
_get_custom_cache_region(expiration_time=expiration_time,
backend='dogpile.cache.memcached',
url=CONF.memcached_servers))
# If the operator has [cache]/enabled flag on then we let oslo_cache
# configure the region from configuration settings.
elif CONF.cache.enabled:
return CacheClient(
_get_default_cache_region(expiration_time=expiration_time))
# If [cache]/enabled flag is off and [DEFAULT]/memcached_servers is
# absent we use the dictionary backend
return CacheClient(
_get_custom_cache_region(expiration_time=expiration_time,
backend='oslo_cache.dict'))
def _get_default_cache_region(expiration_time):
region = cache.create_region()
if expiration_time != 0:
CONF.cache.expiration_time = expiration_time
cache.configure_cache_region(CONF, region)
return region
def _get_custom_cache_region(expiration_time=WEEK,
backend=None,
url=None):
"""Create instance of oslo_cache client.
For backends you can pass specific parameters by kwargs.
For 'dogpile.cache.memcached' backend 'url' parameter must be specified.
:param backend: backend name
:param expiration_time: interval in seconds to indicate maximum
time-to-live value for each key
:param url: memcached url(s)
"""
region = cache.create_region()
region_params = {}
if expiration_time != 0:
region_params['expiration_time'] = expiration_time
if backend == 'oslo_cache.dict':
region_params['arguments'] = {'expiration_time': expiration_time}
elif backend == 'dogpile.cache.memcached':
region_params['arguments'] = {'url': url}
else:
raise RuntimeError(_('old style configuration can use '
'only dictionary or memcached backends'))
region.configure(backend, **region_params)
return region
class CacheClient(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, region):
self.region = region
def get(self, key):
value = self.region.get(key)
if value == cache.NO_VALUE:
return None
return value
def get_or_create(self, key, creator):
return self.region.get_or_create(key, creator)
def set(self, key, value):
return self.region.set(key, value)
def add(self, key, value):
return self.region.get_or_create(key, lambda: value)
def delete(self, key):
return self.region.delete(key)
def get_multi(self, keys):
values = self.region.get_multi(keys)
return [None if value is cache.NO_VALUE else value for value in
values]
def delete_multi(self, keys):
return self.region.delete_multi(keys)

View File

@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_cache import core as cache
from oslo_config import cfg
from oslo_db import options
from oslo_log import log
@ -53,6 +54,7 @@ def parse_args(argv, default_config_files=None, configure_db=True):
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
cache.configure(CONF)
debugger.register_cli_opts()
CONF(argv[1:],
project='nova',

View File

@ -23,12 +23,12 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import cache_utils
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova.i18n import _LI, _LW
from nova import manager
from nova import objects
from nova.openstack.common import memorycache
LOG = logging.getLogger(__name__)
@ -52,17 +52,30 @@ class ConsoleAuthManager(manager.Manager):
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(service_name='consoleauth',
*args, **kwargs)
self.mc = memorycache.get_client()
self._mc = None
self._mc_instance = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
@property
def mc(self):
if self._mc is None:
self._mc = cache_utils.get_client(CONF.console_token_ttl)
return self._mc
@property
def mc_instance(self):
if self._mc_instance is None:
self._mc_instance = cache_utils.get_client()
return self._mc_instance
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _get_tokens_for_instance(self, instance_uuid):
tokens_str = self.mc.get(instance_uuid.encode('UTF-8'))
tokens_str = self.mc_instance.get(instance_uuid.encode('UTF-8'))
if not tokens_str:
tokens = []
else:
@ -86,17 +99,19 @@ class ConsoleAuthManager(manager.Manager):
# We need to log the warning message if the token is not cached
# successfully, because the failure will cause the console for
# instance to not be usable.
if not self.mc.set(token.encode('UTF-8'),
data, CONF.console_token_ttl):
if not self.mc.set(token.encode('UTF-8'), data):
LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
{'token': token})
tokens = self._get_tokens_for_instance(instance_uuid)
# Remove the expired tokens from cache.
tokens = [tok for tok in tokens if self.mc.get(tok.encode('UTF-8'))]
token_values = self.mc.get_multi(
[tok.encode('UTF-8') for tok in tokens])
tokens = [name for name, value in zip(tokens, token_values)
if value is not None]
tokens.append(token)
if not self.mc.set(instance_uuid.encode('UTF-8'),
if not self.mc_instance.set(instance_uuid.encode('UTF-8'),
jsonutils.dumps(tokens)):
LOG.warning(_LW("Instance: %(instance_uuid)s failed to save "
"into memcached"),
@ -136,6 +151,6 @@ class ConsoleAuthManager(manager.Manager):
def delete_tokens_for_instance(self, context, instance_uuid):
tokens = self._get_tokens_for_instance(instance_uuid)
for token in tokens:
self.mc.delete(token.encode('UTF-8'))
self.mc.delete(instance_uuid.encode('UTF-8'))
self.mc.delete_multi(
[tok.encode('UTF-8') for tok in tokens])
self.mc_instance.delete(instance_uuid.encode('UTF-8'))

View File

@ -1,97 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Super simple fake memcache client."""
import copy
from oslo_config import cfg
from oslo_utils import timeutils
memcache_opts = [
cfg.ListOpt('memcached_servers',
help='Memcached servers or None for in process cache.'),
]
CONF = cfg.CONF
CONF.register_opts(memcache_opts)
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(memcache_opts))]
def get_client(memcached_servers=None):
client_cls = Client
if not memcached_servers:
memcached_servers = CONF.memcached_servers
if memcached_servers:
import memcache
client_cls = memcache.Client
return client_cls(memcached_servers, debug=0)
class Client(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs):
"""Ignores the passed in args."""
self.cache = {}
def get(self, key):
"""Retrieves the value for a key or None.
This expunges expired keys during each get.
"""
now = timeutils.utcnow_ts()
for k in list(self.cache):
(timeout, _value) = self.cache[k]
if timeout and now >= timeout:
del self.cache[k]
return self.cache.get(key, (0, None))[1]
def set(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key."""
timeout = 0
if time != 0:
timeout = timeutils.utcnow_ts() + time
self.cache[key] = (timeout, value)
return True
def add(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key if it doesn't exist."""
if self.get(key) is not None:
return False
return self.set(key, value, time, min_compress_len)
def incr(self, key, delta=1):
"""Increments the value for a key."""
value = self.get(key)
if value is None:
return None
new_value = int(value) + delta
self.cache[key] = (self.cache[key][0], str(new_value))
return new_value
def delete(self, key, time=0):
"""Deletes the value associated with a key."""
if key in self.cache:
del self.cache[key]

View File

@ -21,8 +21,8 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from nova import cache_utils
from nova.i18n import _, _LI, _LW
from nova.openstack.common import memorycache
from nova.servicegroup import api
from nova.servicegroup.drivers import base
@ -37,9 +37,8 @@ LOG = logging.getLogger(__name__)
class MemcachedDriver(base.Driver):
def __init__(self, *args, **kwargs):
if not CONF.memcached_servers:
raise RuntimeError(_('memcached_servers not defined'))
self.mc = memorycache.get_client()
self.mc = cache_utils.get_memcached_client(
expiration_time=CONF.service_down_time)
def join(self, member_id, group_id, service=None):
"""Join the given service with its group."""
@ -77,8 +76,7 @@ class MemcachedDriver(base.Driver):
# set(..., time=CONF.service_down_time) uses it and
# reduces key-deleting code.
self.mc.set(str(key),
timeutils.utcnow(),
time=CONF.service_down_time)
timeutils.utcnow())
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):

View File

@ -32,6 +32,7 @@ import mock
import os
import fixtures
from oslo_cache import core as cache
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
@ -60,6 +61,7 @@ CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v21')
logging.register_options(CONF)
CONF.set_override('use_stderr', False)
logging.setup(CONF, 'nova')
cache.configure(CONF)
_TRUE_VALUES = ('True', 'true', '1', 'yes')

View File

@ -58,9 +58,9 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.manager_api.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance_uuid)
self.assertTrue(self.manager_api.check_token(self.context, token))
self.assertIsNotNone(self.manager_api.check_token(self.context, token))
timeutils.advance_time_seconds(1)
self.assertFalse(self.manager_api.check_token(self.context, token))
self.assertIsNone(self.manager_api.check_token(self.context, token))
def _stub_validate_console_port(self, result):
def fake_validate_console_port(ctxt, instance, port, console_type):
@ -84,7 +84,8 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.instance_uuid)
for token in tokens:
self.assertTrue(self.manager_api.check_token(self.context, token))
self.assertIsNotNone(
self.manager_api.check_token(self.context, token))
def test_delete_tokens_for_instance(self):
tokens = [u"token" + str(i) for i in range(10)]
@ -100,7 +101,8 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.assertEqual(len(stored_tokens), 0)
for token in tokens:
self.assertFalse(self.manager_api.check_token(self.context, token))
self.assertIsNone(
self.manager_api.check_token(self.context, token))
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_wrong_token_has_port(self, mock_get):
@ -113,7 +115,7 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.manager_api.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
instance_uuid=self.instance_uuid)
self.assertFalse(self.manager_api.check_token(self.context, token))
self.assertIsNone(self.manager_api.check_token(self.context, token))
def test_delete_expired_tokens(self):
self.useFixture(test.TimeOverride())
@ -126,7 +128,7 @@ class ConsoleauthTestCase(test.NoDBTestCase):
'127.0.0.1', '8080', 'host',
self.instance_uuid)
timeutils.advance_time_seconds(1)
self.assertFalse(self.manager_api.check_token(self.context, token))
self.assertIsNone(self.manager_api.check_token(self.context, token))
token1 = u'mytok2'
self.manager_api.authorize_console(self.context, token1, 'novnc',
@ -148,18 +150,31 @@ class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase):
self.u_instance = u"instance"
def test_authorize_console_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "set")
self.mox.StubOutWithMock(self.manager.mc, "get")
self.manager.mc.set(mox.IsA(str), mox.IgnoreArg(), mox.IgnoreArg()
).AndReturn(True)
self.manager.mc.get(mox.IsA(str)).AndReturn(None)
self.manager.mc.set(mox.IsA(str), mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.manager.authorize_console(self.context, self.u_token, 'novnc',
'127.0.0.1', '8080', 'host',
self.u_instance)
with test.nested(
mock.patch.object(self.manager.mc_instance,
'set', return_value=True),
mock.patch.object(self.manager.mc_instance,
'get', return_value='["token"]'),
mock.patch.object(self.manager.mc,
'set', return_value=True),
mock.patch.object(self.manager.mc,
'get', return_value=None),
mock.patch.object(self.manager.mc,
'get_multi', return_value=["token1"]),
) as (
mock_instance_set,
mock_instance_get,
mock_set,
mock_get,
mock_get_multi):
self.manager.authorize_console(self.context, self.u_token,
'novnc', '127.0.0.1', '8080',
'host', self.u_instance)
mock_set.assert_has_calls([mock.call('token', mock.ANY)])
mock_instance_get.assert_has_calls([mock.call('instance')])
mock_get_multi.assert_has_calls([mock.call(['token'])])
mock_instance_set.assert_has_calls(
[mock.call('instance', mock.ANY)])
def test_check_token_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "get")
@ -170,15 +185,25 @@ class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase):
self.manager.check_token(self.context, self.u_token)
def test_delete_tokens_for_instance_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "delete")
self.mox.StubOutWithMock(self.manager.mc, "get")
self.manager.mc.get(mox.IsA(str)).AndReturn('["token"]')
self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
self.mox.ReplayAll()
self.manager.delete_tokens_for_instance(self.context, self.u_instance)
with test.nested(
mock.patch.object(self.manager.mc_instance,
'get', return_value='["token"]'),
mock.patch.object(self.manager.mc_instance,
'delete', return_value=True),
mock.patch.object(self.manager.mc,
'get'),
mock.patch.object(self.manager.mc,
'delete_multi', return_value=True),
) as (
mock_instance_get,
mock_instance_delete,
mock_get,
mock_delete_multi):
self.manager.delete_tokens_for_instance(self.context,
self.u_instance)
mock_instance_get.assert_has_calls([mock.call('instance')])
mock_instance_delete.assert_has_calls([mock.call('instance')])
mock_delete_multi.assert_has_calls([mock.call(['token'])])
class CellsConsoleauthTestCase(ConsoleauthTestCase):

View File

@ -23,7 +23,7 @@ from nova import test
class MemcachedServiceGroupTestCase(test.NoDBTestCase):
@mock.patch('nova.openstack.common.memorycache.get_client')
@mock.patch('nova.cache_utils.get_memcached_client')
def setUp(self, mgc_mock):
super(MemcachedServiceGroupTestCase, self).setUp()
self.mc_client = mock.MagicMock()
@ -63,4 +63,4 @@ class MemcachedServiceGroupTestCase(test.NoDBTestCase):
fn = self.servicegroup_api._driver._report_state
fn(service)
self.mc_client.set.assert_called_once_with('compute:fake-host',
mock.ANY, time=60)
mock.ANY)

View File

@ -0,0 +1,121 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import cache_utils
from nova import test
class TestOsloCache(test.NoDBTestCase):
def test_get_default_cache_region(self):
region = cache_utils._get_default_cache_region(expiration_time=60)
self.assertEqual(60, region.expiration_time)
self.assertIsNotNone(region)
def test_get_default_cache_region_default_expiration_time(self):
region = cache_utils._get_default_cache_region(expiration_time=0)
# default oslo.cache expiration_time value 600 was taken
self.assertEqual(600, region.expiration_time)
self.assertIsNotNone(region)
@mock.patch('dogpile.cache.region.CacheRegion.configure')
def test_get_client(self, mock_cacheregion):
self.assertIsNotNone(
cache_utils.get_client(expiration_time=60))
self.flags(memcached_servers=['localhost:11211'])
self.assertIsNotNone(
cache_utils.get_client(expiration_time=60))
self.flags(memcached_servers=None)
self.flags(group='cache', enabled=True)
self.assertIsNotNone(
cache_utils.get_client(expiration_time=60))
self.flags(memcached_servers=None)
self.flags(group='cache', enabled=False)
client = cache_utils.get_client(expiration_time=60)
self.assertIsNotNone(client.region)
mock_cacheregion.assert_has_calls(
[mock.call('oslo_cache.dict',
arguments={'expiration_time': 60},
expiration_time=60),
mock.call('dogpile.cache.memcached',
arguments={'url': ['localhost:11211']},
expiration_time=60),
mock.call('dogpile.cache.null',
_config_argument_dict=mock.ANY,
_config_prefix='cache.oslo.arguments.',
expiration_time=60,
wrap=None),
mock.call('oslo_cache.dict',
arguments={'expiration_time': 60},
expiration_time=60)],
)
@mock.patch('dogpile.cache.region.CacheRegion.configure')
def test_get_custom_cache_region(self, mock_cacheregion):
self.assertRaises(RuntimeError,
cache_utils._get_custom_cache_region)
self.assertIsNotNone(
cache_utils._get_custom_cache_region(
backend='oslo_cache.dict'))
self.assertIsNotNone(
cache_utils._get_custom_cache_region(
backend='dogpile.cache.memcached',
url=['localhost:11211']))
mock_cacheregion.assert_has_calls(
[mock.call('oslo_cache.dict',
arguments={'expiration_time': 604800},
expiration_time=604800),
mock.call('dogpile.cache.memcached',
arguments={'url': ['localhost:11211']},
expiration_time=604800)]
)
@mock.patch('dogpile.cache.region.CacheRegion.configure')
def test_get_memcached_client(self, mock_cacheregion):
self.flags(memcached_servers=None)
self.flags(group='cache', enabled=False)
self.assertRaises(
RuntimeError,
cache_utils.get_memcached_client,
expiration_time=60)
self.flags(memcached_servers=['localhost:11211'])
self.assertIsNotNone(
cache_utils.get_memcached_client(expiration_time=60))
self.flags(memcached_servers=['localhost:11211'])
self.assertIsNotNone(
cache_utils.get_memcached_client(expiration_time=60))
self.flags(memcached_servers=None)
self.flags(group='cache', enabled=True)
self.flags(group='cache', memcache_servers=['localhost:11211'])
self.assertIsNotNone(
cache_utils.get_memcached_client(expiration_time=60))
mock_cacheregion.assert_has_calls(
[mock.call('dogpile.cache.memcached',
arguments={'url': ['localhost:11211']},
expiration_time=60),
mock.call('dogpile.cache.memcached',
arguments={'url': ['localhost:11211']},
expiration_time=60),
mock.call('dogpile.cache.null',
_config_argument_dict=mock.ANY,
_config_prefix='cache.oslo.arguments.',
expiration_time=60, wrap=None)]
)

View File

@ -3,7 +3,6 @@
# The list of modules to copy from oslo-incubator
module=cliutils
module=imageutils
module=memorycache
# The base module to hold the copy of openstack.common
base=nova

View File

@ -0,0 +1,9 @@
---
prelude: >
deprecations:
- Option ``memcached_servers`` is deprecated in Mitaka. Operators should
use oslo.cache configuration instead. Specifically ``enabled`` option
under [cache] section should be set to True and the url(s) for the
memcached servers should be in [cache]/memcache_servers option.

View File

@ -34,6 +34,7 @@ six>=1.9.0 # MIT
stevedore>=1.5.0 # Apache-2.0
setuptools>=16.0 # PSF/ZPL
websockify>=0.6.1 # LGPLv3
oslo.cache>=0.8.0 # Apache-2.0
oslo.concurrency>=2.3.0 # Apache-2.0
oslo.config>=3.4.0 # Apache-2.0
oslo.context>=0.2.0 # Apache-2.0

View File

@ -34,7 +34,7 @@ oslo.config.opts =
nova.network = nova.network.opts:list_opts
nova.network.neutronv2 = nova.network.neutronv2.api:list_opts
nova.virt = nova.virt.opts:list_opts
nova.openstack.common.memorycache = nova.openstack.common.memorycache:list_opts
nova.cache_utils = nova.cache_utils:list_opts
nova.compute.monitors.cpu =
virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor